code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(snake_case )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Any = {}
if "candidate_labels" in kwargs:
snake_case: str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
snake_case: List[str] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="This is a photo of {}." ):
'''simple docstring'''
snake_case: List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case: Dict = candidate_labels
snake_case: Optional[int] = [hypothesis_template.format(SCREAMING_SNAKE_CASE__ ) for x in candidate_labels]
snake_case: Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = [text_inputs]
return inputs
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = model_inputs.pop('candidate_labels' )
snake_case: Optional[Any] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE__ ):
snake_case: int = text_inputs[0]
else:
# Batching case.
snake_case: Dict = text_inputs[0][0]
snake_case: List[str] = self.model(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = model_outputs.pop('candidate_labels' )
snake_case: Optional[Any] = model_outputs['logits'][0]
if self.framework == "pt":
snake_case: Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case: List[Any] = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: int = [scores]
elif self.framework == "tf":
snake_case: Union[str, Any] = stable_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
snake_case: Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
snake_case: Optional[int] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , key=lambda SCREAMING_SNAKE_CASE__ : -x[0] )
]
return result
| 329
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: list[list[Edge]] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )]
snake_case: Tuple = size
def __getitem__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._size
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = deque([start_vertex] )
snake_case: list[int | None] = [None] * self.size
snake_case: str = 0
while queue:
snake_case: str = queue.popleft()
snake_case: int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case: List[str] = current_distance + edge.weight
snake_case: List[str] = distances[edge.destination_vertex]
if (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and new_distance >= dest_vertex_distance
):
continue
snake_case: Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( a__ ):
'''simple docstring'''
__A : torch.FloatTensor
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A=3 , __A=3 , __A=("DownEncoderBlock2D",) , __A=(64,) , __A=2 , __A=32 , __A="silu" , __A=True , ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Tuple = layers_per_block
lowerCamelCase : List[str] = torch.nn.Convad(
lowerCAmelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase : str = None
lowerCamelCase : Any = nn.ModuleList([] )
# down
lowerCamelCase : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase__ ):
lowerCamelCase : Dict = output_channel
lowerCamelCase : str = block_out_channels[i]
lowerCamelCase : int = i == len(lowerCAmelCase__ ) - 1
lowerCamelCase : List[Any] = get_down_block(
lowerCAmelCase__ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
self.down_blocks.append(lowerCAmelCase__ )
# mid
lowerCamelCase : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
# out
lowerCamelCase : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase__ , eps=1e-6 )
lowerCamelCase : Dict = nn.SiLU()
lowerCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
lowerCamelCase : List[Any] = nn.Convad(block_out_channels[-1] , lowerCAmelCase__ , 3 , padding=1 )
lowerCamelCase : List[str] = False
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = x
lowerCamelCase : Dict = self.conv_in(lowerCAmelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__A ):
def custom_forward(*__A ):
return module(*lowerCAmelCase__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowerCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
# middle
lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
else:
for down_block in self.down_blocks:
lowerCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ )
# middle
lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase__ )
else:
# down
for down_block in self.down_blocks:
lowerCamelCase : Optional[int] = down_block(lowerCAmelCase__ )
# middle
lowerCamelCase : int = self.mid_block(lowerCAmelCase__ )
# post-process
lowerCamelCase : Tuple = self.conv_norm_out(lowerCAmelCase__ )
lowerCamelCase : int = self.conv_act(lowerCAmelCase__ )
lowerCamelCase : str = self.conv_out(lowerCAmelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A=3 , __A=3 , __A=("UpDecoderBlock2D",) , __A=(64,) , __A=2 , __A=32 , __A="silu" , __A="group" , ):
"""simple docstring"""
super().__init__()
lowerCamelCase : List[str] = layers_per_block
lowerCamelCase : Optional[int] = nn.Convad(
lowerCAmelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = nn.ModuleList([] )
lowerCamelCase : Optional[int] = in_channels if norm_type == "spatial" else None
# mid
lowerCamelCase : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
# up
lowerCamelCase : List[str] = list(reversed(lowerCAmelCase__ ) )
lowerCamelCase : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase__ ):
lowerCamelCase : str = output_channel
lowerCamelCase : Tuple = reversed_block_out_channels[i]
lowerCamelCase : Any = i == len(lowerCAmelCase__ ) - 1
lowerCamelCase : str = get_up_block(
lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , resnet_time_scale_shift=lowerCAmelCase__ , )
self.up_blocks.append(lowerCAmelCase__ )
lowerCamelCase : List[str] = output_channel
# out
if norm_type == "spatial":
lowerCamelCase : Optional[Any] = SpatialNorm(block_out_channels[0] , lowerCAmelCase__ )
else:
lowerCamelCase : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase__ , eps=1e-6 )
lowerCamelCase : int = nn.SiLU()
lowerCamelCase : Union[str, Any] = nn.Convad(block_out_channels[0] , lowerCAmelCase__ , 3 , padding=1 )
lowerCamelCase : int = False
def _snake_case ( self , __A , __A=None ):
"""simple docstring"""
lowerCamelCase : Any = z
lowerCamelCase : Union[str, Any] = self.conv_in(lowerCAmelCase__ )
lowerCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__A ):
def custom_forward(*__A ):
return module(*lowerCAmelCase__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowerCamelCase : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
lowerCamelCase : List[Any] = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
lowerCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
else:
# middle
lowerCamelCase : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase : int = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# middle
lowerCamelCase : List[Any] = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase : int = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
lowerCamelCase : Optional[Any] = up_block(lowerCAmelCase__ , lowerCAmelCase__ )
# post-process
if latent_embeds is None:
lowerCamelCase : str = self.conv_norm_out(lowerCAmelCase__ )
else:
lowerCamelCase : Optional[int] = self.conv_norm_out(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase : Any = self.conv_act(lowerCAmelCase__ )
lowerCamelCase : Tuple = self.conv_out(lowerCAmelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A=None , __A="random" , __A=False , __A=True ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Union[str, Any] = n_e
lowerCamelCase : Optional[Any] = vq_embed_dim
lowerCamelCase : int = beta
lowerCamelCase : Any = legacy
lowerCamelCase : str = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCamelCase : str = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowerCamelCase : List[Any] = self.used.shape[0]
lowerCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase : List[str] = self.re_embed
lowerCamelCase : List[str] = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
lowerCamelCase : str = n_e
lowerCamelCase : Union[str, Any] = sane_index_shape
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = inds.shape
assert len(lowerCAmelCase__ ) > 1
lowerCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
lowerCamelCase : Optional[Any] = self.used.to(lowerCAmelCase__ )
lowerCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase : str = match.argmax(-1 )
lowerCamelCase : int = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCamelCase : Optional[Any] = self.unknown_index
return new.reshape(lowerCAmelCase__ )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : str = inds.shape
assert len(lowerCAmelCase__ ) > 1
lowerCamelCase : Tuple = inds.reshape(ishape[0] , -1 )
lowerCamelCase : Dict = self.used.to(lowerCAmelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase : Optional[Any] = 0 # simply set to zero
lowerCamelCase : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase__ )
return back.reshape(lowerCAmelCase__ )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCamelCase : Optional[int] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase : Tuple = torch.argmin(torch.cdist(lowerCAmelCase__ , self.embedding.weight ) , dim=1 )
lowerCamelCase : Dict = self.embedding(lowerCAmelCase__ ).view(z.shape )
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = None
# compute loss for embedding
if not self.legacy:
lowerCamelCase : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCamelCase : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCamelCase : Optional[int] = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCamelCase : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCamelCase : Optional[Any] = self.remap_to_used(lowerCAmelCase__ )
lowerCamelCase : List[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCamelCase : List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self , __A , __A ):
"""simple docstring"""
if self.remap is not None:
lowerCamelCase : List[str] = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCamelCase : Optional[Any] = self.unmap_to_all(lowerCAmelCase__ )
lowerCamelCase : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCamelCase : str = self.embedding(lowerCAmelCase__ )
if shape is not None:
lowerCamelCase : List[str] = z_q.view(lowerCAmelCase__ )
# reshape back to match original input shape
lowerCamelCase : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( a__ ):
'''simple docstring'''
def __init__( self , __A , __A=False ):
"""simple docstring"""
lowerCamelCase : Any = parameters
lowerCamelCase : Tuple = torch.chunk(lowerCAmelCase__ , 2 , dim=1 )
lowerCamelCase : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowerCamelCase : int = deterministic
lowerCamelCase : Optional[Any] = torch.exp(0.5 * self.logvar )
lowerCamelCase : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowerCamelCase : int = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _snake_case ( self , __A = None ):
"""simple docstring"""
lowerCamelCase : List[str] = randn_tensor(
self.mean.shape , generator=lowerCAmelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCamelCase : List[str] = self.mean + self.std * sample
return x
def _snake_case ( self , __A=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _snake_case ( self , __A , __A=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
lowerCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase__ )
def _snake_case ( self ):
"""simple docstring"""
return self.mean
| 709
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
lowerCamelCase : Any = {
"input_ids": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase : Tuple = model(__A )["last_hidden_state"]
lowerCamelCase : Tuple = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
lowerCamelCase : str = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 231
| 0
|
'''simple docstring'''
from __future__ import annotations
def A_ ( __SCREAMING_SNAKE_CASE : list[int] ) -> Union[str, Any]:
if not nums:
return 0
__SCREAMING_SNAKE_CASE : List[str] = nums[0]
__SCREAMING_SNAKE_CASE : Dict = 0
for num in nums[1:]:
__SCREAMING_SNAKE_CASE : Any = (
max_excluding + num,
max(_a , _a ),
)
return max(_a , _a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158
|
from PIL import Image
def A__ ( _a : Image , _a : float ):
'''simple docstring'''
def brightness(_a : int ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_a )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
__lowerCamelCase : str = change_brightness(img, 1_00)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 385
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase : int = 1_6
lowerCamelCase : Union[str, Any] = 3_2
def lowercase__( A , A = 1_6 ):
snake_case__ : Dict = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case__ : Any = load_dataset('glue' , 'mrpc' )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Union[str, Any] = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : str = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Union[str, Any] = 1_6
elif accelerator.mixed_precision != "no":
snake_case__ : List[str] = 8
else:
snake_case__ : Any = None
return tokenizer.pad(
A , padding='longest' , max_length=A , pad_to_multiple_of=A , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A )
snake_case__ : Optional[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase : Dict = mocked_dataloaders # noqa: F811
def lowercase__( A , A ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A ) == "1":
snake_case__ : Dict = 2
# Initialize accelerator
snake_case__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Optional[int] = config['lr']
snake_case__ : Dict = int(config['num_epochs'] )
snake_case__ : str = int(config['seed'] )
snake_case__ : List[Any] = int(config['batch_size'] )
snake_case__ : str = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
snake_case__ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : str = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(A )
snake_case__ , snake_case__ : Tuple = get_dataloaders(A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : Optional[int] = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
snake_case__ : Dict = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_0_0 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : Tuple = model(**A )
snake_case__ : Optional[int] = outputs.loss
snake_case__ : int = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case__ : Optional[int] = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : List[str] = model(**A )
snake_case__ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : Union[str, Any] = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case__ : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A , references=A , )
snake_case__ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
def lowercase__( ):
snake_case__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A , default=A , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
snake_case__ : str = parser.parse_args()
snake_case__ : List[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(A , A )
if __name__ == "__main__":
main()
| 303
|
def lowercase__( A ):
return " ".join(
''.join(word[::-1] ) if len(A ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 303
| 1
|
'''simple docstring'''
def _snake_case ( A_ : list ):
"""simple docstring"""
a_ : List[str] = False
while is_sorted is False: # Until all the indices are traversed keep looping
a_ : Dict = True
for i in range(0 , len(A_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
a_ , a_ : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
a_ : Union[str, Any] = False
for i in range(1 , len(A_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
a_ , a_ : str = input_list[i + 1], input_list[i]
# swapping if elements not in order
a_ : str = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__snake_case: int = [int(x) for x in input().split()]
# inputing elements of the list in one line
__snake_case: str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 577
|
'''simple docstring'''
def _snake_case ( A_ : Optional[int] ):
"""simple docstring"""
a_ : str = len(A_ )
for i in range(length - 1 ):
a_ : List[Any] = i
for k in range(i + 1 , A_ ):
if collection[k] < collection[least]:
a_ : Union[str, Any] = k
if least != i:
a_ , a_ : int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
__snake_case: Any = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 577
| 1
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__a = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = ["""input_ids""", """attention_mask"""]
_A : Optional[Any] = TaTokenizer
_A : List[int] = []
def __init__( self: str , snake_case: Any=None , snake_case: Any=None , snake_case: Union[str, Any]="</s>" , snake_case: str="<unk>" , snake_case: int="<pad>" , snake_case: str=100 , snake_case: int=None , **snake_case: int , ) -> Union[str, Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ :Optional[Any] = [f"""<extra_id_{i}>""" for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
snake_case_ :Optional[Any] = len(set(filter(lambda snake_case : bool("""extra_id_""" in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
snake_case , tokenizer_file=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , **snake_case , )
snake_case_ :str = vocab_file
snake_case_ :Tuple = False if not self.vocab_file else True
snake_case_ :Tuple = extra_ids
@staticmethod
def lowerCAmelCase_ ( snake_case: List[str] , snake_case: str , snake_case: int ) -> str:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
snake_case_ :Optional[Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , snake_case , )
return max_model_length
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :int = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def lowerCAmelCase_ ( self: Tuple , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :List[str] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
snake_case_ :Optional[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
return list(
set(filter(lambda snake_case : bool(re.search(r"""<extra_id_\d+>""" , snake_case ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase_ ( self: List[str] ) -> int:
return [self.convert_tokens_to_ids(snake_case ) for token in self.get_sentinel_tokens()]
| 310
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__a = ""
__a = ""
__a = ""
__a = 1 # (0 is vertical, 1 is horizontal)
def A_ ( ):
'''simple docstring'''
snake_case_, snake_case_ :Optional[Any] = get_dataset(_lowercase, _lowercase )
print("""Processing...""" )
snake_case_, snake_case_, snake_case_ :str = update_image_and_anno(_lowercase, _lowercase, _lowercase )
for index, image in enumerate(_lowercase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ :str = random_chars(32 )
snake_case_ :Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""", 1 )[0]
snake_case_ :List[str] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""", _lowercase, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(_lowercase )} with {file_name}""" )
snake_case_ :int = []
for anno in new_annos[index]:
snake_case_ :Union[str, Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(_lowercase )
with open(f"""/{file_root}.txt""", """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = []
snake_case_ :List[Any] = []
for label_file in glob.glob(os.path.join(_lowercase, """*.txt""" ) ):
snake_case_ :List[str] = label_file.split(os.sep )[-1].rsplit(""".""", 1 )[0]
with open(_lowercase ) as in_file:
snake_case_ :Any = in_file.readlines()
snake_case_ :Any = os.path.join(_lowercase, f"""{label_name}.jpg""" )
snake_case_ :int = []
for obj_list in obj_lists:
snake_case_ :List[Any] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowercase )
labels.append(_lowercase )
return img_paths, labels
def A_ ( _lowercase, _lowercase, _lowercase = 1 ):
'''simple docstring'''
snake_case_ :Union[str, Any] = []
snake_case_ :Optional[int] = []
snake_case_ :Any = []
for idx in range(len(_lowercase ) ):
snake_case_ :Union[str, Any] = []
snake_case_ :List[Any] = img_list[idx]
path_list.append(_lowercase )
snake_case_ :List[str] = anno_list[idx]
snake_case_ :Optional[Any] = cva.imread(_lowercase )
if flip_type == 1:
snake_case_ :Optional[Any] = cva.flip(_lowercase, _lowercase )
for bbox in img_annos:
snake_case_ :str = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case_ :List[str] = cva.flip(_lowercase, _lowercase )
for bbox in img_annos:
snake_case_ :Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowercase )
new_imgs_list.append(_lowercase )
return new_imgs_list, new_annos_lists, path_list
def A_ ( _lowercase = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
snake_case_ :Any = ascii_lowercase + digits
return "".join(random.choice(_lowercase ) for _ in range(_lowercase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 310
| 1
|
import cva
import numpy as np
class __A :
"""simple docstring"""
def __init__( self , a__ , a__):
"""simple docstring"""
if k in (0.04, 0.06):
_lowerCamelCase : Optional[Any] = k
_lowerCamelCase : int = window_size
else:
raise ValueError('''invalid k value''')
def __str__( self):
"""simple docstring"""
return str(self.k)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cva.imread(a__ , 0)
_lowerCamelCase, _lowerCamelCase : List[str] = img.shape
_lowerCamelCase : list[list[int]] = []
_lowerCamelCase : str = img.copy()
_lowerCamelCase : str = cva.cvtColor(a__ , cva.COLOR_GRAY2RGB)
_lowerCamelCase, _lowerCamelCase : Tuple = np.gradient(a__)
_lowerCamelCase : Optional[int] = dx**2
_lowerCamelCase : Any = dy**2
_lowerCamelCase : Optional[Any] = dx * dy
_lowerCamelCase : Any = 0.04
_lowerCamelCase : Any = self.window_size // 2
for y in range(a__ , h - offset):
for x in range(a__ , w - offset):
_lowerCamelCase : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCamelCase : str = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCamelCase : Optional[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCamelCase : List[str] = (wxx * wyy) - (wxy**2)
_lowerCamelCase : int = wxx + wyy
_lowerCamelCase : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 255)
return color_img, corner_list
if __name__ == "__main__":
_lowerCamelCase = HarrisCorner(0.04, 3)
_lowerCamelCase , _lowerCamelCase = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 114
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *a__ , **a__):
"""simple docstring"""
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__)
| 114
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase : Any = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
_UpperCamelCase : int = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
_UpperCamelCase : Any = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
_UpperCamelCase : Optional[int] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 0
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = "ybelkada/fonts"
def _a ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'Pix2StructImageProcessor. Please upgrade torch.' )
def _a ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Any ):
'''simple docstring'''
requires_backends(lowercase__ , ['torch'] )
_check_torch_version()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_tensor.unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.nn.functional.unfold(lowercase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
SCREAMING_SNAKE_CASE__ : Optional[int] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowercase__ , lowercase__ , -1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _a ( lowercase__ : str , lowercase__ : int = 36 , lowercase__ : str = "black" , lowercase__ : str = "white" , lowercase__ : int = 5 , lowercase__ : int = 5 , lowercase__ : int = 5 , lowercase__ : int = 5 , lowercase__ : Optional[bytes] = None , lowercase__ : Optional[str] = None , ):
'''simple docstring'''
requires_backends(lowercase__ , 'vision' )
# Add new lines so that each line is no more than 80 characters.
SCREAMING_SNAKE_CASE__ : List[str] = textwrap.TextWrapper(width=80 )
SCREAMING_SNAKE_CASE__ : int = wrapper.wrap(text=lowercase__ )
SCREAMING_SNAKE_CASE__ : str = '\n'.join(lowercase__ )
if font_bytes is not None and font_path is None:
SCREAMING_SNAKE_CASE__ : str = io.BytesIO(lowercase__ )
elif font_path is not None:
SCREAMING_SNAKE_CASE__ : int = font_path
else:
SCREAMING_SNAKE_CASE__ : Tuple = hf_hub_download(lowercase__ , 'Arial.TTF' )
SCREAMING_SNAKE_CASE__ : Dict = ImageFont.truetype(lowercase__ , encoding='UTF-8' , size=lowercase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
SCREAMING_SNAKE_CASE__ : Any = ImageDraw.Draw(Image.new('RGB' , (1, 1) , lowercase__ ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = temp_draw.textbbox((0, 0) , lowercase__ , lowercase__ )
# Create the actual image with a bit of padding around the text.
SCREAMING_SNAKE_CASE__ : Optional[int] = text_width + left_padding + right_padding
SCREAMING_SNAKE_CASE__ : str = text_height + top_padding + bottom_padding
SCREAMING_SNAKE_CASE__ : Tuple = Image.new('RGB' , (image_width, image_height) , lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ImageDraw.Draw(lowercase__ )
draw.text(xy=(left_padding, top_padding) , text=lowercase__ , fill=lowercase__ , font=lowercase__ )
return image
def _a ( lowercase__ : np.ndarray , lowercase__ : str , **lowercase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(lowercase__ , 'vision' )
# Convert to PIL image if necessary
SCREAMING_SNAKE_CASE__ : int = to_pil_image(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = render_text(lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(header_image.width , image.width )
SCREAMING_SNAKE_CASE__ : int = int(image.height * (new_width / image.width) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(header_image.height * (new_width / header_image.width) )
SCREAMING_SNAKE_CASE__ : int = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
SCREAMING_SNAKE_CASE__ : List[str] = to_numpy_array(lowercase__ )
if infer_channel_dimension_format(lowercase__ ) == ChannelDimension.LAST:
SCREAMING_SNAKE_CASE__ : Dict = to_channel_dimension_format(lowercase__ , ChannelDimension.LAST )
return new_image
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['flattened_patches']
def __init__( self : List[Any] , a_ : bool = True , a_ : bool = True , a_ : Dict[str, int] = None , a_ : int = 2048 , a_ : bool = False , **a_ : List[Any] , )-> None:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : Tuple = patch_size if patch_size is not None else {'height': 16, 'width': 16}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Dict = do_convert_rgb
SCREAMING_SNAKE_CASE__ : int = max_patches
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_vqa
def __lowercase( self : Union[str, Any] , a_ : np.ndarray , a_ : int , a_ : dict , **a_ : Dict )-> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
SCREAMING_SNAKE_CASE__ : Any = to_channel_dimension_format(a_ , ChannelDimension.FIRST )
SCREAMING_SNAKE_CASE__ : Dict = torch.from_numpy(a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = patch_size['height'], patch_size['width']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_image_size(a_ )
# maximize scale s.t.
SCREAMING_SNAKE_CASE__ : Tuple = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
SCREAMING_SNAKE_CASE__ : int = max(min(math.floor(scale * image_height / patch_height ) , a_ ) , 1 )
SCREAMING_SNAKE_CASE__ : str = max(min(math.floor(scale * image_width / patch_width ) , a_ ) , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = max(num_feasible_rows * patch_height , 1 )
SCREAMING_SNAKE_CASE__ : List[str] = max(num_feasible_cols * patch_width , 1 )
SCREAMING_SNAKE_CASE__ : str = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=a_ , antialias=a_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch_extract_patches(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = patches.shape
SCREAMING_SNAKE_CASE__ : Dict = patches_shape[1]
SCREAMING_SNAKE_CASE__ : List[str] = patches_shape[2]
SCREAMING_SNAKE_CASE__ : Optional[Any] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE__ : int = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
SCREAMING_SNAKE_CASE__ : Any = torch.arange(a_ ).reshape([rows, 1] ).repeat(1 , a_ ).reshape([rows * columns, 1] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.arange(a_ ).reshape([1, columns] ).repeat(a_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
SCREAMING_SNAKE_CASE__ : Dict = row_ids.to(torch.floataa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE__ : Dict = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.pad(a_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
SCREAMING_SNAKE_CASE__ : int = to_numpy_array(a_ )
return result
def __lowercase( self : Dict , a_ : np.ndarray , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Optional[int] )-> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
SCREAMING_SNAKE_CASE__ : Optional[int] = image.astype(np.floataa )
# take mean across the whole `image`
SCREAMING_SNAKE_CASE__ : List[str] = np.mean(a_ )
SCREAMING_SNAKE_CASE__ : Dict = np.std(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(a_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(a_ , mean=a_ , std=a_ , **a_ )
def __lowercase( self : Optional[Any] , a_ : ImageInput , a_ : Optional[str] = None , a_ : bool = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Dict[str, int]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : str , )-> ImageInput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE__ : str = patch_size if patch_size is not None else self.patch_size
SCREAMING_SNAKE_CASE__ : List[str] = max_patches if max_patches is not None else self.max_patches
SCREAMING_SNAKE_CASE__ : Any = self.is_vqa
if kwargs.get('data_format' , a_ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
SCREAMING_SNAKE_CASE__ : Any = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE__ : str = [convert_to_rgb(a_ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Tuple = [to_numpy_array(a_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop('font_bytes' , a_ )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('font_path' , a_ )
if isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[Any] = [header_text] * len(a_ )
SCREAMING_SNAKE_CASE__ : Any = [
render_header(a_ , header_text[i] , font_bytes=a_ , font_path=a_ )
for i, image in enumerate(a_ )
]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.normalize(image=a_ ) for image in images]
# convert to torch tensor and permute
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
self.extract_flattened_patches(image=a_ , max_patches=a_ , patch_size=a_ )
for image in images
]
# create attention mask in numpy
SCREAMING_SNAKE_CASE__ : List[str] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
SCREAMING_SNAKE_CASE__ : List[str] = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=a_ )
return encoded_outputs
| 85
|
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__lowerCAmelCase , __lowerCAmelCase )
return actual_power(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 252
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowerCAmelCase = datasets.logging.get_logger(__name__)
_lowerCAmelCase = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_lowerCAmelCase = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_lowerCAmelCase = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="dummy_doc" ):
"""simple docstring"""
lowerCAmelCase__ : str = {doc: key_lines}
lowerCAmelCase__ : Tuple = {doc: sys_lines}
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = reader.get_doc_mentions(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase__ : Optional[int] = reader.set_annotated_parse_trees(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reader.get_doc_mentions(UpperCamelCase , sys_doc_lines[doc] , UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCAmelCase__ : List[str] = reader.set_annotated_parse_trees(UpperCamelCase , key_doc_lines[doc] , UpperCamelCase , UpperCamelCase )
if remove_nested:
lowerCAmelCase__ , lowerCAmelCase__ : str = reader.remove_nested_coref_mentions(UpperCamelCase , UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCAmelCase__ , lowerCAmelCase__ : Any = reader.remove_nested_coref_mentions(UpperCamelCase , UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCAmelCase__ : Optional[int] = reader.get_mention_assignments(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = reader.get_mention_assignments(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"""Number of resulting singleton clusters in the key """
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"""files, respectively""" )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = get_coref_infos(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = 0
for name, metric in metrics:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = evaluator.evaluate_documents(UpperCamelCase , UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
lowerCAmelCase__ : Any = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
lowerCAmelCase__ : List[Any] = line.split()[5]
if not parse_col == "-":
lowerCAmelCase__ : Union[str, Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) ,codebase_urls=["""https://github.com/ns-moosavi/coval"""] ,reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ) -> str:
lowerCAmelCase__ : List[str] = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
lowerCAmelCase__ : Optional[int] = util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCAmelCase__ : Dict = evaluate(
key_lines=__UpperCAmelCase ,sys_lines=__UpperCAmelCase ,metrics=__UpperCAmelCase ,NP_only=__UpperCAmelCase ,remove_nested=__UpperCAmelCase ,keep_singletons=__UpperCAmelCase ,min_span=__UpperCAmelCase ,)
return score
| 160
| 1
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( _snake_case : int ) -> datetime:
'''simple docstring'''
_A = year % 19
_A = year % 4
_A = year % 7
_A = math.floor(year / 1_00 )
_A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_A = leap_day_inhibits / 4
_A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 18 )
else:
return datetime(_snake_case , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 7
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: torch.FloatTensor
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase__ = 65_536 , lowerCamelCase__ = None , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 0 , lowerCamelCase__ = "fourier" , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase__ = "UNetMidBlock1D" , lowerCamelCase__ = None , lowerCamelCase__ = (32, 32, 64) , lowerCamelCase__ = None , lowerCamelCase__ = 8 , lowerCamelCase__ = 1 , lowerCamelCase__ = False , ):
super().__init__()
lowerCAmelCase_: Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase_: Dict = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
lowerCAmelCase_: List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase_: Tuple = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
lowerCAmelCase_: Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase_: Tuple = block_out_channels[0] * 4
lowerCAmelCase_: Any = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
lowerCAmelCase_: str = nn.ModuleList([] )
lowerCAmelCase_: Dict = None
lowerCAmelCase_: Optional[Any] = nn.ModuleList([] )
lowerCAmelCase_: int = None
# down
lowerCAmelCase_: List[str] = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = output_channel
lowerCAmelCase_: Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase_: List[str] = i == len(lowerCamelCase__ ) - 1
lowerCAmelCase_: List[str] = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
lowerCAmelCase_: Optional[int] = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
lowerCAmelCase_: Dict = list(reversed(lowerCamelCase__ ) )
lowerCAmelCase_: Any = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase_: str = out_channels
else:
lowerCAmelCase_: Any = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: Dict = output_channel
lowerCAmelCase_: int = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
lowerCAmelCase_: Optional[int] = i == len(lowerCamelCase__ ) - 1
lowerCAmelCase_: Union[str, Any] = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
lowerCAmelCase_: str = output_channel
# out
lowerCAmelCase_: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase_: int = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ):
lowerCAmelCase_: Any = timestep
if not torch.is_tensor(lowerCamelCase__ ):
lowerCAmelCase_: Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
lowerCAmelCase_: List[Any] = timesteps[None].to(sample.device )
lowerCAmelCase_: Union[str, Any] = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
lowerCAmelCase_: Any = self.time_mlp(lowerCamelCase__ )
else:
lowerCAmelCase_: Any = timestep_embed[..., None]
lowerCAmelCase_: str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase_: Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase_: Dict = ()
for downsample_block in self.down_blocks:
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase_: int = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase_: Any = down_block_res_samples[-1:]
lowerCAmelCase_: str = down_block_res_samples[:-1]
lowerCAmelCase_: List[str] = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
lowerCAmelCase_: Any = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 613
| 0
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = ['''names''', '''prefix''']
SCREAMING_SNAKE_CASE__ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
SCREAMING_SNAKE_CASE__ = ['''encoding_errors''', '''on_bad_lines''']
SCREAMING_SNAKE_CASE__ = ['''date_format''']
@dataclass
class _UpperCamelCase( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : str = ","
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : Optional[Union[int, List[int], str]] = "infer"
__SCREAMING_SNAKE_CASE : Optional[List[str]] = None
__SCREAMING_SNAKE_CASE : Optional[List[str]] = None
__SCREAMING_SNAKE_CASE : Optional[Union[int, str, List[int], List[str]]] = None
__SCREAMING_SNAKE_CASE : Optional[Union[List[int], List[str]]] = None
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : Optional[Literal["c", "python", "pyarrow"]] = None
__SCREAMING_SNAKE_CASE : Dict[Union[int, str], Callable[[Any], Any]] = None
__SCREAMING_SNAKE_CASE : Optional[list] = None
__SCREAMING_SNAKE_CASE : Optional[list] = None
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : Optional[Union[int, List[int]]] = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : str = "."
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : str = '"'
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : int = 1_0000
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
__SCREAMING_SNAKE_CASE : Optional[str] = "strict"
__SCREAMING_SNAKE_CASE : Literal["error", "warn", "skip"] = "error"
__SCREAMING_SNAKE_CASE : Optional[str] = None
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
if self.delimiter is not None:
__a : Optional[int] = self.delimiter
if self.column_names is not None:
__a : Any = self.column_names
@property
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Optional[int] = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _UpperCamelCase( datasets.ArrowBasedBuilder ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = CsvConfig
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__a : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE__ , (str, list, tuple) ):
__a : Optional[Any] = data_files
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : List[Any] = [files]
__a : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__a : Any = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : List[str] = [files]
__a : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ , gen_kwargs={'files': files} ) )
return splits
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
__a : Any = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE__ ) for feature in self.config.features.values() ):
# cheaper cast
__a : int = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__a : int = table_cast(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return pa_table
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Optional[Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__a : List[str] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) ):
__a : str = pd.read_csv(SCREAMING_SNAKE_CASE__ , iterator=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE__ ):
__a : List[str] = pa.Table.from_pandas(SCREAMING_SNAKE_CASE__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE__ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE__ )}: {e}''' )
raise
| 577
|
import torch
from transformers import AutoModel
class _UpperCamelCase( torch.nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
__a : List[str] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = torch.nn.CosineSimilarity(3 , 1e-08 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __lowerCAmelCase ( self : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
return self.bert(**SCREAMING_SNAKE_CASE__ ).last_hidden_state
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = W_supports['sizes'].tolist()
__a : Dict = W_supports['start_token_id'].item()
__a : Tuple = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Optional[Any] = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Dict = None
__a : str = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Any = W_supports['input_ids'] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == 0:
__a : str = 0
else:
__a : str = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Dict = S[s : s + size][end_token_masks[s : s + size]]
__a : Optional[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : List[Any] = torch.vstack((p_starts, p_start) )
__a : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
__a : str = p_start
__a : List[Any] = p_end
return p_starts, p_ends
| 577
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=False ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
SCREAMING_SNAKE_CASE__ = 'segformer.encoder.' + key
if key.startswith('backbone' ):
SCREAMING_SNAKE_CASE__ = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE__ = key[key.find('patch_embed' ) + len('patch_embed' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(UpperCamelCase_ )-1}' )
if "norm" in key:
SCREAMING_SNAKE_CASE__ = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE__ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(UpperCamelCase_ )-1}' )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE__ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE__ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE__ = key[key.find('block' ) + len('block' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'block{idx}' , F'block.{int(UpperCamelCase_ )-1}' )
if "attn.q" in key:
SCREAMING_SNAKE_CASE__ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
SCREAMING_SNAKE_CASE__ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
SCREAMING_SNAKE_CASE__ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
SCREAMING_SNAKE_CASE__ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE__ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE__ = key.replace('linear_fuse.conv' , 'linear_fuse' )
SCREAMING_SNAKE_CASE__ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE__ = key[key.find('linear_c' ) + len('linear_c' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'linear_c{idx}' , F'linear_c.{int(UpperCamelCase_ )-1}' )
if key.startswith('head' ):
SCREAMING_SNAKE_CASE__ = key.replace('head' , 'classifier' )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE__ = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE__ = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE__ = kv_bias[
config.hidden_sizes[i] :
]
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return image
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SegformerConfig()
SCREAMING_SNAKE_CASE__ = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = 'huggingface/label-files'
if "segformer" in model_name:
SCREAMING_SNAKE_CASE__ = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE__ = (1, 150, 128, 128)
elif "city" in model_name:
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = 'cityscapes-id2label.json'
SCREAMING_SNAKE_CASE__ = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_name[4:6]
SCREAMING_SNAKE_CASE__ = 1000
SCREAMING_SNAKE_CASE__ = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 256
elif size == "b2":
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 768
SCREAMING_SNAKE_CASE__ = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 768
SCREAMING_SNAKE_CASE__ = [3, 4, 18, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 768
SCREAMING_SNAKE_CASE__ = [3, 8, 27, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 768
SCREAMING_SNAKE_CASE__ = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_ )
# prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase_ , map_location=torch.device('cpu' ) )
else:
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(UpperCamelCase_ , encoder_only=UpperCamelCase_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase_ , UpperCamelCase_ )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = SegformerForImageClassification(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = SegformerForSemanticSegmentation(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
SCREAMING_SNAKE_CASE__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 472
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase__ :
A__ : CommonSchedulerState
# setable values
A__ : jnp.ndarray
A__ : jnp.ndarray
A__ : Optional[int] =None
@classmethod
def A_ ( cls : List[str] , UpperCAmelCase_ : CommonSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray ):
return cls(common=UpperCAmelCase_ , init_noise_sigma=UpperCAmelCase_ , timesteps=UpperCAmelCase_ )
@dataclass
class lowercase__ ( _UpperCAmelCase ):
A__ : DDPMSchedulerState
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ):
A__ : List[Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
A__ : jnp.dtype
@property
def A_ ( self : List[Any] ):
return True
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase_ : int = 1000 , UpperCAmelCase_ : float = 0.0_001 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : Optional[jnp.ndarray] = None , UpperCAmelCase_ : str = "fixed_small" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "epsilon" , UpperCAmelCase_ : jnp.dtype = jnp.floataa , ):
SCREAMING_SNAKE_CASE__ = dtype
def A_ ( self : str , UpperCAmelCase_ : Optional[CommonSchedulerState] = None ):
if common is None:
SCREAMING_SNAKE_CASE__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ = jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCAmelCase_ , init_noise_sigma=UpperCAmelCase_ , timesteps=UpperCAmelCase_ , )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : Optional[int] = None ):
return sample
def A_ ( self : Any , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple = () ):
SCREAMING_SNAKE_CASE__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ = (jnp.arange(0 , UpperCAmelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_ , )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=None ):
SCREAMING_SNAKE_CASE__ = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__ = jnp.clip(UpperCAmelCase_ , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__ = jnp.log(jnp.clip(UpperCAmelCase_ , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__ = variance
SCREAMING_SNAKE_CASE__ = state.common.betas[t]
SCREAMING_SNAKE_CASE__ = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__ = frac * max_log + (1 - frac) * min_log
return variance
def A_ ( self : List[Any] , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : Optional[jax.random.KeyArray] = None , UpperCAmelCase_ : bool = True , ):
SCREAMING_SNAKE_CASE__ = timestep
if key is None:
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jnp.split(UpperCAmelCase_ , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__ = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__ = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__ = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ = jnp.clip(UpperCAmelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCAmelCase_ , num=1 )
SCREAMING_SNAKE_CASE__ = jax.random.normal(UpperCAmelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCAmelCase_ , UpperCAmelCase_ , predicted_variance=UpperCAmelCase_ ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCAmelCase_ , state=UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , ):
return add_noise_common(state.common , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : DDPMSchedulerState , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , UpperCAmelCase_ : jnp.ndarray , ):
return get_velocity_common(state.common , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __len__( self : Optional[int] ):
return self.config.num_train_timesteps
| 472
| 1
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCamelCase_ = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def _UpperCAmelCase ( UpperCamelCase: str = "dhaka" , UpperCamelCase: int = 5 ):
"""simple docstring"""
__lowerCAmelCase = min(__lowerCAmelCase , 5_0 ) # Prevent abuse!
__lowerCAmelCase = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
__lowerCAmelCase = requests.get("https://www.google.com/search" , params=__lowerCAmelCase , headers=__lowerCAmelCase )
__lowerCAmelCase = BeautifulSoup(html.text , "html.parser" )
__lowerCAmelCase = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
__lowerCAmelCase = json.dumps(__lowerCAmelCase )
__lowerCAmelCase = json.loads(__lowerCAmelCase )
__lowerCAmelCase = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , __lowerCAmelCase , )
if not matched_google_image_data:
return 0
__lowerCAmelCase = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(__lowerCAmelCase ) , )
__lowerCAmelCase = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , __lowerCAmelCase , )
for index, fixed_full_res_image in enumerate(__lowerCAmelCase ):
if index >= max_images:
return index
__lowerCAmelCase = bytes(__lowerCAmelCase , "ascii" ).decode(
"unicode-escape" )
__lowerCAmelCase = bytes(__lowerCAmelCase , "ascii" ).decode(
"unicode-escape" )
__lowerCAmelCase = urllib.request.build_opener()
__lowerCAmelCase = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(__lowerCAmelCase )
__lowerCAmelCase = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
urllib.request.urlretrieve( # noqa: S310
__lowerCAmelCase , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
UpperCamelCase_ = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("Please provide a search term.")
raise
| 709
|
import numpy as np
import qiskit
def _UpperCAmelCase ( UpperCamelCase: int = 8 , UpperCamelCase: int | None = None ):
"""simple docstring"""
__lowerCAmelCase = np.random.default_rng(seed=UpperCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
__lowerCAmelCase = rng.integers(2 , size=UpperCamelCase )
# The set of states Alice will prepare.
__lowerCAmelCase = rng.integers(2 , size=UpperCamelCase )
# Measurement basis for Bob's qubits.
__lowerCAmelCase = rng.integers(2 , size=UpperCamelCase )
# Quantum Circuit to simulate BB84
__lowerCAmelCase = qiskit.QuantumCircuit(UpperCamelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(UpperCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(UpperCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(UpperCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(UpperCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(UpperCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__lowerCAmelCase = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__lowerCAmelCase = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1 , seed_simulator=UpperCamelCase )
# Returns the result of measurement.
__lowerCAmelCase = job.result().get_counts(UpperCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__lowerCAmelCase = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__lowerCAmelCase = gen_key[:key_len] if len(UpperCamelCase ) >= key_len else gen_key.ljust(UpperCamelCase , "0" )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 376
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase = logging.getLogger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return (preds == labels).mean()
@dataclass
class A_ :
'''simple docstring'''
_UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_UpperCamelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_UpperCamelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_UpperCamelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class A_ :
'''simple docstring'''
_UpperCamelCase : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_UpperCamelCase : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
_UpperCamelCase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_UpperCamelCase : bool = field(
default=__lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
lowercase = processors[data_args.task_name]()
lowercase = processor.get_labels()
lowercase = len(__SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__SCREAMING_SNAKE_CASE ) -> Dict:
lowercase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
lowercase = DataCollatorWithPadding(__SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , compute_metrics=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase = trainer.evaluate()
lowercase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(__SCREAMING_SNAKE_CASE )
return results
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 84
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
@staticmethod
def lowerCAmelCase (*snake_case_ : int , **snake_case_ : List[str] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCAmelCase (self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Dict ):
__a : Union[str, Any] = ObjectDetectionPipeline(model=snake_case_ , image_processor=snake_case_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] , snake_case_ : Any ):
__a : Any = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
'''score''': ANY(snake_case_ ),
'''label''': ANY(snake_case_ ),
'''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )},
} , )
import datasets
__a : List[Any] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__a : List[str] = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__a : Optional[int] = object_detector(snake_case_ , threshold=0.0 )
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for outputs in batch_outputs:
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
'''score''': ANY(snake_case_ ),
'''label''': ANY(snake_case_ ),
'''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def lowerCAmelCase (self : int ):
pass
@require_torch
def lowerCAmelCase (self : Tuple ):
__a : str = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__a : int = AutoModelForObjectDetection.from_pretrained(snake_case_ )
__a : int = AutoFeatureExtractor.from_pretrained(snake_case_ )
__a : Union[str, Any] = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
__a : List[str] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
] , )
__a : Dict = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : List[Any] ):
__a : Optional[Any] = '''facebook/detr-resnet-50'''
__a : int = AutoModelForObjectDetection.from_pretrained(snake_case_ )
__a : str = AutoFeatureExtractor.from_pretrained(snake_case_ )
__a : Tuple = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
__a : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
__a : Optional[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : Optional[int] ):
__a : Any = '''facebook/detr-resnet-50'''
__a : Optional[Any] = pipeline('''object-detection''' , model=snake_case_ )
__a : Optional[int] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
__a : List[str] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : Union[str, Any] ):
__a : Tuple = 0.9985
__a : Tuple = '''facebook/detr-resnet-50'''
__a : int = pipeline('''object-detection''' , model=snake_case_ )
__a : Optional[Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=snake_case_ )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCAmelCase (self : List[str] ):
__a : Optional[int] = '''Narsil/layoutlmv3-finetuned-funsd'''
__a : Any = 0.9993
__a : Tuple = pipeline('''object-detection''' , model=snake_case_ , threshold=snake_case_ )
__a : Optional[Any] = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
] , )
| 521
| 0
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case__ ( _snake_case ):
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : int = 8
# DPR tok
snake_case__ : Tuple = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case__ : List[Any] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
snake_case__ : Dict = os.path.join(_lowerCamelCase , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
snake_case__ : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case__ : List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
snake_case__ : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case__ : Union[str, Any] = {'unk_token': '<unk>'}
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
snake_case__ : Dict = os.path.join(_lowerCamelCase , BART_VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : List[str] = os.path.join(_lowerCamelCase , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def UpperCAmelCase__ ( self : int ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCAmelCase__ ( self : str ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCAmelCase__ ( self : str ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def UpperCAmelCase__ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : Optional[int] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : List[str] = self.get_dummy_dataset()
snake_case__ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case__ : Optional[int] = dataset
snake_case__ : List[str] = RagRetriever(
_lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : bool ):
snake_case__ : List[Any] = self.get_dummy_dataset()
snake_case__ : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
snake_case__ : List[Any] = os.path.join(self.tmpdirname , 'dataset' )
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
snake_case__ : str = RagRetriever(
_lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case__ : str = RagRetriever(
_lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _lowerCamelCase ) , )
return retriever
def UpperCAmelCase__ ( self : Any ):
snake_case__ : Optional[int] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
snake_case__ : Dict = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
snake_case__ : List[Any] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(_lowerCamelCase , open(_lowerCamelCase , 'wb' ) )
snake_case__ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
snake_case__ : List[str] = RagRetriever(
_lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : Optional[int] = 1
snake_case__ : Tuple = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : Tuple = retriever.retrieve(_lowerCamelCase , n_docs=_lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case__ : str = self.get_dummy_dataset()
retriever.save_pretrained(_lowerCamelCase )
snake_case__ : List[str] = RagRetriever.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
snake_case__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Optional[Any] = retriever.retrieve(_lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self : int ):
snake_case__ : List[Any] = 1
snake_case__ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCamelCase )
snake_case__ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : Tuple = retriever.retrieve(_lowerCamelCase , n_docs=_lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : Any ):
snake_case__ : int = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_lowerCamelCase )
snake_case__ : Optional[Any] = RagRetriever.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Optional[int] = retriever.retrieve(_lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self : int ):
snake_case__ : List[str] = 1
snake_case__ : int = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCamelCase )
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : Any = retriever.retrieve(_lowerCamelCase , n_docs=_lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , _lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_lowerCamelCase )
snake_case__ : Optional[Any] = RagRetriever.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Tuple = retriever.retrieve(_lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : int = 1
snake_case__ : Optional[Any] = self.get_dummy_legacy_index_retriever()
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : str = retriever.retrieve(_lowerCamelCase , n_docs=_lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , _lowerCamelCase )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : int ):
snake_case__ : Dict = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_lowerCamelCase )
snake_case__ : Dict = RagRetriever.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[str] = retriever.retrieve(_lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase__ ( self : int ):
import torch
snake_case__ : int = 1
snake_case__ : List[str] = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : Optional[int] = [[5, 7], [1_0, 1_1]]
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Dict = retriever(_lowerCamelCase , _lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=_lowerCamelCase )
snake_case__ , snake_case__ , snake_case__ : str = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , np.ndarray )
snake_case__ : List[Any] = retriever(
_lowerCamelCase , _lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=_lowerCamelCase , return_tensors='pt' , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Tuple = self.get_dpr_ctx_encoder_tokenizer()
snake_case__ : Tuple = 1
snake_case__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCamelCase )
retriever.set_ctx_encoder_tokenizer(_lowerCamelCase )
snake_case__ : str = [[5, 7], [1_0, 1_1]]
snake_case__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Union[str, Any] = retriever(_lowerCamelCase , _lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=_lowerCamelCase )
self.assertEqual(
len(_lowerCamelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , _lowerCamelCase ) # check for doc token related keys in dictionary.
| 713
|
lowerCamelCase : int = {str(digit): digit**5 for digit in range(1_0)}
def lowercase__( A ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def lowercase__( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 303
| 0
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCamelCase = logging.getLogger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """summarization"""
__SCREAMING_SNAKE_CASE = ["""loss"""]
__SCREAMING_SNAKE_CASE = ROUGE_KEYS
__SCREAMING_SNAKE_CASE = """rouge2"""
def __init__(self , __a , **__a ) -> Optional[int]:
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase__ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(__a , num_labels=__a , mode=self.mode , **__a )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
UpperCAmelCase__ = Path(self.output_dir ) / 'metrics.json'
UpperCAmelCase__ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(__a )
UpperCAmelCase__ = self.config.model_type
UpperCAmelCase__ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
UpperCAmelCase__ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase__ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
UpperCAmelCase__ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase__ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase__ = get_git_info()['repo_sha']
UpperCAmelCase__ = hparams.num_workers
UpperCAmelCase__ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __a ):
UpperCAmelCase__ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase__ = self.decoder_start_token_id
UpperCAmelCase__ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
UpperCAmelCase__ = False
UpperCAmelCase__ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase__ = self.hparams.eval_max_gen_length
else:
UpperCAmelCase__ = self.model.config.max_length
UpperCAmelCase__ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase__ (self , __a ) -> Dict[str, List[str]]:
"""simple docstring"""
UpperCAmelCase__ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(__a , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
UpperCAmelCase__ = True
return readable_batch
def UpperCamelCase__ (self , __a , **__a ) -> int:
"""simple docstring"""
return self.model(__a , **__a )
def UpperCamelCase__ (self , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
return lmap(str.strip , __a )
def UpperCamelCase__ (self , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.pad_token_id
UpperCAmelCase__ , UpperCAmelCase__ = batch['input_ids'], batch['attention_mask']
UpperCAmelCase__ = batch['labels']
if isinstance(self.model , __a ):
UpperCAmelCase__ = self.model._shift_right(__a )
else:
UpperCAmelCase__ = shift_tokens_right(__a , __a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase__ = decoder_input_ids
self.save_readable_batch(__a )
UpperCAmelCase__ = self(__a , attention_mask=__a , decoder_input_ids=__a , use_cache=__a )
UpperCAmelCase__ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase__ = nn.CrossEntropyLoss(ignore_index=__a )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase__ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCAmelCase__ = nn.functional.log_softmax(__a , dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = label_smoothed_nll_loss(
__a , __a , self.hparams.label_smoothing , ignore_index=__a )
return (loss,)
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase__ (self , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self._step(__a )
UpperCAmelCase__ = dict(zip(self.loss_names , __a ) )
# tokens per batch
UpperCAmelCase__ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
UpperCAmelCase__ = batch['input_ids'].shape[0]
UpperCAmelCase__ = batch['input_ids'].eq(self.pad ).sum()
UpperCAmelCase__ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase__ (self , __a , __a ) -> Dict:
"""simple docstring"""
return self._generative_step(__a )
def UpperCamelCase__ (self , __a , __a="val" ) -> Dict:
"""simple docstring"""
self.step_count += 1
UpperCAmelCase__ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase__ = losses['loss']
UpperCAmelCase__ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
UpperCAmelCase__ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase__ = torch.tensor(__a ).type_as(__a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__a )
UpperCAmelCase__ = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
UpperCAmelCase__ = self.step_count
self.metrics[prefix].append(__a ) # callback writes this to self.metrics_save_path
UpperCAmelCase__ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def UpperCamelCase__ (self , __a , __a ) -> Dict:
"""simple docstring"""
return calculate_rouge(__a , __a )
def UpperCamelCase__ (self , __a ) -> dict:
"""simple docstring"""
UpperCAmelCase__ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase__ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=__a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase__ = (time.time() - ta) / batch['input_ids'].shape[0]
UpperCAmelCase__ = self.ids_to_clean_text(__a )
UpperCAmelCase__ = self.ids_to_clean_text(batch['labels'] )
UpperCAmelCase__ = self._step(__a )
UpperCAmelCase__ = dict(zip(self.loss_names , __a ) )
UpperCAmelCase__ = self.calc_generative_metrics(__a , __a )
UpperCAmelCase__ = np.mean(lmap(__a , __a ) )
base_metrics.update(gen_time=__a , gen_len=__a , preds=__a , target=__a , **__a )
return base_metrics
def UpperCamelCase__ (self , __a , __a ) -> int:
"""simple docstring"""
return self._generative_step(__a )
def UpperCamelCase__ (self , __a ) -> int:
"""simple docstring"""
return self.validation_epoch_end(__a , prefix='test' )
def UpperCamelCase__ (self , __a ) -> SeqaSeqDataset:
"""simple docstring"""
UpperCAmelCase__ = self.n_obs[type_path]
UpperCAmelCase__ = self.target_lens[type_path]
UpperCAmelCase__ = self.dataset_class(
self.tokenizer , type_path=__a , n_obs=__a , max_target_length=__a , **self.dataset_kwargs , )
return dataset
def UpperCamelCase__ (self , __a , __a , __a = False ) -> DataLoader:
"""simple docstring"""
UpperCAmelCase__ = self.get_dataset(__a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase__ = dataset.make_sortish_sampler(__a , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase__ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_sampler=__a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
def UpperCamelCase__ (self ) -> DataLoader:
"""simple docstring"""
UpperCAmelCase__ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=__a )
return dataloader
def UpperCamelCase__ (self ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase__ (self ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase__ (__a , __a ) -> Dict:
"""simple docstring"""
BaseTransformer.add_model_specific_args(__a , __a )
add_generic_args(__a , __a )
parser.add_argument(
'--max_source_length' , default=1024 , type=__a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=__a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=__a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=__a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=__a )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=__a )
parser.add_argument('--max_tokens_per_batch' , type=__a , default=__a )
parser.add_argument('--logger_name' , type=__a , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=__a , default=-1 , required=__a , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=__a , default=500 , required=__a , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=__a , default=-1 , required=__a , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=__a , default='summarization' , required=__a , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=__a , default=0.0 , required=__a )
parser.add_argument('--src_lang' , type=__a , default='' , required=__a )
parser.add_argument('--tgt_lang' , type=__a , default='' , required=__a )
parser.add_argument('--eval_beams' , type=__a , default=__a , required=__a )
parser.add_argument(
'--val_metric' , type=__a , default=__a , required=__a , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=__a , default=__a , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=__a , default=1 , required=__a , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=__a , default=-1 , required=__a , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """translation"""
__SCREAMING_SNAKE_CASE = ["""loss"""]
__SCREAMING_SNAKE_CASE = ["""bleu"""]
__SCREAMING_SNAKE_CASE = """bleu"""
def __init__(self , __a , **__a ) -> Any:
"""simple docstring"""
super().__init__(__a , **__a )
UpperCAmelCase__ = hparams.src_lang
UpperCAmelCase__ = hparams.tgt_lang
def UpperCamelCase__ (self , __a , __a ) -> dict:
"""simple docstring"""
return calculate_bleu(__a , __a )
def UpperCamelCase_( snake_case__: int , snake_case__: str=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=snake_case__ )
check_output_dir(snake_case__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase__ = SummarizationModule(snake_case__ )
else:
UpperCAmelCase__ = TranslationModule(snake_case__ )
UpperCAmelCase__ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
UpperCAmelCase__ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase__ = os.environ.get('WANDB_PROJECT' , snake_case__ )
UpperCAmelCase__ = WandbLogger(name=model.output_dir.name , project=snake_case__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase__ = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
UpperCAmelCase__ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase__ = False
UpperCAmelCase__ = args.val_metric == 'loss'
UpperCAmelCase__ = generic_train(
snake_case__ , snake_case__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , snake_case__ ) , early_stopping_callback=snake_case__ , logger=snake_case__ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
UpperCAmelCase__ = ''
UpperCAmelCase__ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=snake_case__ ) )
if checkpoints:
UpperCAmelCase__ = checkpoints[-1]
UpperCAmelCase__ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
_UpperCamelCase = pl.Trainer.add_argparse_args(parser)
_UpperCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCamelCase = parser.parse_args()
main(args)
| 146
|
class lowercase :
'''simple docstring'''
def __init__(self , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = val
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def UpperCamelCase__ (self , __a ) -> Tuple:
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
UpperCAmelCase__ = Node(__a )
else:
self.left.insert(__a )
elif val > self.val:
if self.right is None:
UpperCAmelCase__ = Node(__a )
else:
self.right.insert(__a )
else:
UpperCAmelCase__ = val
def UpperCamelCase_( snake_case__: Any , snake_case__: int ) -> str:
# Recursive traversal
if root:
inorder(root.left , snake_case__ )
res.append(root.val )
inorder(root.right , snake_case__ )
def UpperCamelCase_( snake_case__: Tuple ) -> Any:
# Build BST
if len(snake_case__ ) == 0:
return arr
UpperCAmelCase__ = Node(arr[0] )
for i in range(1 , len(snake_case__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCAmelCase__ = []
inorder(snake_case__ , snake_case__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 146
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 13
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 99
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 37
SCREAMING_SNAKE_CASE = 'gelu'
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 512
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0.02
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = None
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = TFDistilBertModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE = TFDistilBertForMaskedLM(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = TFDistilBertForQuestionAnswering(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFDistilBertForSequenceClassification(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = TFDistilBertForMultipleChoice(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFDistilBertForTokenClassification(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
SCREAMING_SNAKE_CASE_ : str = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = TFDistilBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , dim=37 )
def __A ( self ) -> Dict:
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ )
@slow
def __A ( self ) -> Optional[Any]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
SCREAMING_SNAKE_CASE = TFDistilBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
| 327
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase () -> List[Any]:
raise RuntimeError('CUDA out of memory.' )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 )
def __A ( self , lowerCAmelCase__ ) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__ ) ) )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCAmelCase__ , [128, 64, 32, 16, 8] )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = mock_training_loop_function('hello' )
self.assertListEqual(lowerCAmelCase__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def __A ( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase__ ):
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def __A ( self ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def __A ( self ) -> str:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def __A ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase__ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = release_memory(lowerCAmelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCAmelCase__ )
| 327
| 1
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=0.6 , A_=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = mask_ratio
SCREAMING_SNAKE_CASE__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFViTMAEModel(config=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFViTMAEForPreTraining(_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , training=_lowercase )
# expected sequence length = num_patches
SCREAMING_SNAKE_CASE__ = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = TFViTMAEForPreTraining(_lowercase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(_lowercase , training=_lowercase )
SCREAMING_SNAKE_CASE__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE__) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCamelCase__ : int = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFViTMAEModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , tf.keras.layers.Layer ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , noise=_lowercase )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self._prepare_for_class(_lowercase , _lowercase ) )
SCREAMING_SNAKE_CASE__ = model(**_lowercase , noise=_lowercase )
SCREAMING_SNAKE_CASE__ = outputs_dict[0].numpy()
SCREAMING_SNAKE_CASE__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(A_ ):
SCREAMING_SNAKE_CASE__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ = v.numpy()
else:
SCREAMING_SNAKE_CASE__ = np.array(_lowercase )
return inputs_np_dict
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = prepare_numpy_arrays(_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , noise=_lowercase )
SCREAMING_SNAKE_CASE__ = model(**_lowercase , noise=_lowercase )
self.assert_outputs_same(_lowercase , _lowercase )
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ = tf.constant(_lowercase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE__ = tf_noise
super().check_pt_tf_models(_lowercase , _lowercase , _lowercase )
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowercase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(_lowercase , _lowercase ),)
if isinstance(_lowercase , _lowercase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowercase , '''_keras_serializable''' , _lowercase )
}
SCREAMING_SNAKE_CASE__ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(_lowercase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
SCREAMING_SNAKE_CASE__ = main_layer_class(_lowercase )
SCREAMING_SNAKE_CASE__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
SCREAMING_SNAKE_CASE__ = tf.keras.Model(_lowercase , outputs=main_layer(_lowercase ) )
SCREAMING_SNAKE_CASE__ = model(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = os.path.join(_lowercase , '''keras_model.h5''' )
model.save(_lowercase )
SCREAMING_SNAKE_CASE__ = tf.keras.models.load_model(
_lowercase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowercase , tf.keras.Model )
SCREAMING_SNAKE_CASE__ = model(_lowercase )
self.assert_outputs_same(_lowercase , _lowercase )
@slow
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , noise=_lowercase )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE__ = outputs.last_hidden_state.numpy()
SCREAMING_SNAKE_CASE__ = 0
else:
SCREAMING_SNAKE_CASE__ = outputs.logits.numpy()
SCREAMING_SNAKE_CASE__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , noise=_lowercase )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE__ = after_outputs['''last_hidden_state'''].numpy()
SCREAMING_SNAKE_CASE__ = 0
else:
SCREAMING_SNAKE_CASE__ = after_outputs['''logits'''].numpy()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowercase , 1E-5 )
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase , noise=_lowercase )
SCREAMING_SNAKE_CASE__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowercase )
SCREAMING_SNAKE_CASE__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
SCREAMING_SNAKE_CASE__ = model_class.from_config(model.config )
SCREAMING_SNAKE_CASE__ = new_model(_lowercase ) # Build model
new_model.set_weights(model.get_weights() )
SCREAMING_SNAKE_CASE__ = new_model(_lowercase , noise=_lowercase )
self.assert_outputs_same(_lowercase , _lowercase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_lowercase )
def __snake_case ( ) -> Dict:
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_lowercase , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE__ = ViTMAEConfig()
SCREAMING_SNAKE_CASE__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(1, num_patches) )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**_lowercase , noise=_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , _lowercase )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _lowercase , atol=1E-4 )
| 100
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( lowercase: str = "" ) -> dict[str, float]:
'''simple docstring'''
_UpperCamelCase: Tuple = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
_UpperCamelCase: Union[str, Any] = BeautifulSoup(requests.get(lowercase ).text , '''html.parser''' )
_UpperCamelCase: List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
_UpperCamelCase: str = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase , lowercase )
}
def lowerCAmelCase_ ( lowercase: str = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
_UpperCamelCase: Any = get_imdb_top_aaa_movies()
with open(lowercase , '''w''' , newline='''''' ) as out_file:
_UpperCamelCase: Optional[Any] = csv.writer(lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 271
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase: ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowerCamelCase: ClassVar[Features] = Features({} )
_lowerCamelCase: str = "text"
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 22
|
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 22
| 1
|
"""simple docstring"""
from __future__ import annotations
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : str = list(range(len(lowerCAmelCase ) ) )
A__ : Any = [v / w for v, w in zip(lowerCAmelCase , lowerCAmelCase )]
index.sort(key=lambda lowerCAmelCase : ratio[i] , reverse=lowerCAmelCase )
A__ : float = 0
A__ : list[float] = [0] * len(lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
A__ : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ : str = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCamelCase = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
_UpperCamelCase = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
_UpperCamelCase = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase (datasets.Metric ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : str = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A__ : Optional[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A__ : Optional[Any] = evaluate(dataset=snake_case_ , predictions=snake_case_ )
return score
| 363
| 1
|
from ...configuration_utils import PretrainedConfig
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Optional[int] = """bert-generation"""
def __init__( self , A=5_03_58 , A=10_24 , A=24 , A=16 , A=40_96 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=0.02 , A=1e-12 , A=0 , A=2 , A=1 , A="absolute" , A=True , **A , ) ->Dict:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ :Optional[Any] = vocab_size
UpperCAmelCase__ :Optional[int] = hidden_size
UpperCAmelCase__ :int = num_hidden_layers
UpperCAmelCase__ :List[Any] = num_attention_heads
UpperCAmelCase__ :Optional[int] = hidden_act
UpperCAmelCase__ :str = intermediate_size
UpperCAmelCase__ :List[str] = hidden_dropout_prob
UpperCAmelCase__ :List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ :int = max_position_embeddings
UpperCAmelCase__ :Dict = initializer_range
UpperCAmelCase__ :Any = layer_norm_eps
UpperCAmelCase__ :Optional[int] = position_embedding_type
UpperCAmelCase__ :Tuple = use_cache
| 715
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=24 , A=2 , A=6 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=None , A=10_00 , ) ->Any:
UpperCAmelCase__ :Tuple = parent
UpperCAmelCase__ :List[str] = batch_size
UpperCAmelCase__ :Optional[int] = seq_length
UpperCAmelCase__ :str = is_training
UpperCAmelCase__ :Tuple = use_input_mask
UpperCAmelCase__ :Optional[int] = use_token_type_ids
UpperCAmelCase__ :int = use_labels
UpperCAmelCase__ :Tuple = vocab_size
UpperCAmelCase__ :int = hidden_size
UpperCAmelCase__ :Any = num_hidden_layers
UpperCAmelCase__ :List[Any] = num_attention_heads
UpperCAmelCase__ :Tuple = intermediate_size
UpperCAmelCase__ :List[str] = hidden_act
UpperCAmelCase__ :Any = hidden_dropout_prob
UpperCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ :List[str] = max_position_embeddings
UpperCAmelCase__ :str = type_vocab_size
UpperCAmelCase__ :int = type_sequence_label_size
UpperCAmelCase__ :int = initializer_range
UpperCAmelCase__ :str = num_labels
UpperCAmelCase__ :Tuple = scope
UpperCAmelCase__ :int = range_bbox
def A__ ( self ) ->Union[str, Any]:
UpperCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ :List[Any] = bbox[i, j, 3]
UpperCAmelCase__ :Union[str, Any] = bbox[i, j, 1]
UpperCAmelCase__ :str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ :Any = bbox[i, j, 2]
UpperCAmelCase__ :Dict = bbox[i, j, 0]
UpperCAmelCase__ :Optional[Any] = t
UpperCAmelCase__ :int = None
if self.use_input_mask:
UpperCAmelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ :int = None
if self.use_token_type_ids:
UpperCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :List[str] = None
if self.use_labels:
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ :List[str] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self ) ->Optional[int]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A__ ( self , A , A , A , A , A , A , A , ) ->Any:
UpperCAmelCase__ :Any = LiltModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :Tuple = model(A , bbox=A , attention_mask=A , token_type_ids=A )
UpperCAmelCase__ :List[str] = model(A , bbox=A , token_type_ids=A )
UpperCAmelCase__ :int = model(A , bbox=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self , A , A , A , A , A , A , A , ) ->Dict:
UpperCAmelCase__ :List[str] = self.num_labels
UpperCAmelCase__ :Optional[Any] = LiltForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :Tuple = model(
A , bbox=A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , A , A , A , A , A , A , A , ) ->Union[str, Any]:
UpperCAmelCase__ :str = LiltForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ :str = model(
A , bbox=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ) ->Dict:
UpperCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) :Dict = config_and_inputs
UpperCAmelCase__ :Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__a : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Optional[Any] = False
__a : int = False
def A__ ( self , A , A , A , A , A ) ->str:
return True
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :Dict = LiltModelTester(self )
UpperCAmelCase__ :Optional[int] = ConfigTester(self , config_class=A , hidden_size=37 )
def A__ ( self ) ->Optional[Any]:
self.config_tester.run_common_tests()
def A__ ( self ) ->List[str]:
UpperCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ :Optional[int] = type
self.model_tester.create_and_check_model(*A )
def A__ ( self ) ->Any:
UpperCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def A__ ( self ) ->Optional[int]:
UpperCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def A__ ( self ) ->int:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ :Union[str, Any] = LiltModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->int:
UpperCAmelCase__ :int = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(A )
UpperCAmelCase__ :List[Any] = torch.tensor([[1, 2]] , device=A )
UpperCAmelCase__ :Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ :Union[str, Any] = model(input_ids=A , bbox=A )
UpperCAmelCase__ :Tuple = torch.Size([1, 2, 7_68] )
UpperCAmelCase__ :Any = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A , )
self.assertTrue(outputs.last_hidden_state.shape , A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A , atol=1e-3 ) )
| 433
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = """speech_to_text_2"""
lowerCAmelCase_ : Tuple = ["""past_key_values"""]
lowerCAmelCase_ : Optional[int] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] , _UpperCAmelCase : Tuple=1_00_00 , _UpperCAmelCase : Optional[int]=6 , _UpperCAmelCase : Union[str, Any]=20_48 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[Any]="relu" , _UpperCAmelCase : Union[str, Any]=2_56 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Dict=10_24 , **_UpperCAmelCase : Tuple , ):
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = d_model
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = max_target_positions
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 603
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : int=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Dict=37 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Union[str, Any]=1_28 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : int=4 , _UpperCAmelCase : List[Any]=None , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , ):
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = NezhaModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = NezhaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = NezhaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = NezhaForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Tuple = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = NezhaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = NezhaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(config=_UpperCAmelCase )
UpperCAmelCase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , """bert.pt""" ) )
UpperCAmelCase__ = torch.jit.load(os.path.join(_UpperCAmelCase , """bert.pt""" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(_UpperCAmelCase ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
UpperCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCAmelCase__ = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
UpperCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCAmelCase__ = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 603
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class snake_case_ ( a_ ):
def __init__( self , a_ = None , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ):
a_ : Dict = path_or_paths
a_ : Optional[Any] = split if split or isinstance(a_ , a_ ) else "train"
a_ : Any = features
a_ : str = cache_dir
a_ : List[Any] = keep_in_memory
a_ : int = streaming
a_ : List[str] = num_proc
a_ : str = kwargs
@abstractmethod
def snake_case_ ( self ):
pass
class snake_case_ ( a_ ):
def __init__( self , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ):
a_ : Dict = features
a_ : Optional[Any] = cache_dir
a_ : Dict = keep_in_memory
a_ : List[str] = streaming
a_ : Dict = num_proc
a_ : Union[str, Any] = kwargs
@abstractmethod
def snake_case_ ( self ):
pass
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE_ = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370
| 1
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = OmegaConf.load(__snake_case )
_UpperCamelCase = torch.load(__snake_case , map_location='''cpu''' )['''model''']
_UpperCamelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCamelCase = {}
_UpperCamelCase = '''first_stage_model.'''
for key in keys:
if key.startswith(__snake_case ):
_UpperCamelCase = state_dict[key]
# extract state_dict for UNetLDM
_UpperCamelCase = {}
_UpperCamelCase = '''model.diffusion_model.'''
for key in keys:
if key.startswith(__snake_case ):
_UpperCamelCase = state_dict[key]
_UpperCamelCase = config.model.params.first_stage_config.params
_UpperCamelCase = config.model.params.unet_config.params
_UpperCamelCase = VQModel(**__snake_case ).eval()
vqvae.load_state_dict(__snake_case )
_UpperCamelCase = UNetLDMModel(**__snake_case ).eval()
unet.load_state_dict(__snake_case )
_UpperCamelCase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__snake_case , )
_UpperCamelCase = LDMPipeline(__snake_case , __snake_case , __snake_case )
pipeline.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
_lowerCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 10
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__a : Dict , **__a : List[Any] ):
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 692
| 0
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : str =field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowerCamelCase : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowerCamelCase : int =field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = self.task_name.lower()
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] ="train"
lowerCamelCase : Optional[int] ="dev"
lowerCamelCase : Dict ="test"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : GlueDataTrainingArguments
lowerCamelCase : str
lowerCamelCase : List[InputFeatures]
def __init__( self : Union[str, Any] , lowerCAmelCase : GlueDataTrainingArguments , lowerCAmelCase : PreTrainedTokenizerBase , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Union[str, Split] = Split.train , lowerCAmelCase : Optional[str] = None , ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowerCAmelCase , )
__lowerCAmelCase : int = args
__lowerCAmelCase : Tuple = glue_processors[args.task_name]()
__lowerCAmelCase : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase , lowerCAmelCase ):
try:
__lowerCAmelCase : Dict = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowerCAmelCase : Dict = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
__lowerCAmelCase : Optional[int] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCAmelCase : Optional[Any] = label_list[2], label_list[1]
__lowerCAmelCase : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase : int = cached_features_file + """.lock"""
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
__lowerCAmelCase : Optional[Any] = time.time()
__lowerCAmelCase : Dict = torch.load(lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
__lowerCAmelCase : Any = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowerCAmelCase : List[str] = self.processor.get_test_examples(args.data_dir )
else:
__lowerCAmelCase : Optional[int] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowerCAmelCase : List[str] = examples[:limit_length]
__lowerCAmelCase : str = glue_convert_examples_to_features(
lowerCAmelCase , lowerCAmelCase , max_length=args.max_seq_length , label_list=lowerCAmelCase , output_mode=self.output_mode , )
__lowerCAmelCase : List[str] = time.time()
torch.save(self.features , lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : List[str] ) -> str:
"""simple docstring"""
return len(self.features )
def __getitem__( self : int , lowerCAmelCase : Dict ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.label_list
| 719
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
__UpperCAmelCase = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
__UpperCAmelCase = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def snake_case_ (__A : List[str] ) -> Optional[int]:
__lowerCAmelCase : List[Any] = set()
__lowerCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase : int = char
__lowerCAmelCase : str = set(__A )
return pairs
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Tuple =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]="<s>" , lowerCAmelCase : List[str]="</s>" , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : Dict="<s>" , lowerCAmelCase : Optional[Any]="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : Dict="<mask>" , **lowerCAmelCase : int , ) -> Dict:
"""simple docstring"""
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , )
__lowerCAmelCase : int = vocab_file
__lowerCAmelCase : int = merges_file
__lowerCAmelCase : Union[str, Any] = {}
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : List[str] = 3
self.add_from_file(lowerCAmelCase )
__lowerCAmelCase : List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
__lowerCAmelCase : Dict = merges_handle.read().split("""\n""" )[:-1]
__lowerCAmelCase : Any = [tuple(merge.split()[:-1] ) for merge in merges]
__lowerCAmelCase : Dict = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
__lowerCAmelCase : int = {}
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase : str = [self.cls_token_id]
__lowerCAmelCase : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowerCAmelCase : str = [self.sep_token_id]
__lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCAmelCase : Union[str, Any] = tuple(lowerCAmelCase )
__lowerCAmelCase : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__lowerCAmelCase : Optional[Any] = get_pairs(lowerCAmelCase )
if not pairs:
return token
while True:
__lowerCAmelCase : Optional[Any] = min(lowerCAmelCase , key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase ,__lowerCAmelCase : int = bigram
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : List[Any] = 0
while i < len(lowerCAmelCase ):
try:
__lowerCAmelCase : Tuple = word.index(lowerCAmelCase , lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase : Any = j
if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase : Dict = tuple(lowerCAmelCase )
__lowerCAmelCase : List[Any] = new_word
if len(lowerCAmelCase ) == 1:
break
else:
__lowerCAmelCase : Dict = get_pairs(lowerCAmelCase )
__lowerCAmelCase : List[str] = """@@ """.join(lowerCAmelCase )
__lowerCAmelCase : Any = word[:-4]
__lowerCAmelCase : int = word
return word
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : str = re.findall(r"""\S+\n?""" , lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : int ) -> Any:
"""simple docstring"""
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """ """.join(lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : List[Any] = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : Dict = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.merges_file , lowerCAmelCase )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ):
try:
with open(lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__lowerCAmelCase : Union[str, Any] = f.readlines()
for lineTmp in lines:
__lowerCAmelCase : Optional[int] = lineTmp.strip()
__lowerCAmelCase : str = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
__lowerCAmelCase : int = line[:idx]
__lowerCAmelCase : List[Any] = len(self.encoder )
| 218
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , a : Optional[int] , a : List[str]=13 , a : Optional[int]=10 , a : str=3 , a : int=2 , a : Optional[Any]=2 , a : Any=True , a : Optional[int]=True , a : List[Any]=32 , a : Optional[Any]=5 , a : str=4 , a : Optional[int]=37 , a : int="gelu" , a : Dict=0.1 , a : Optional[Any]=0.1 , a : Dict=10 , a : List[Any]=0.02 , a : Dict="divided_space_time" , a : Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : List[str] = num_frames
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_type
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : Dict = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Dict = (num_frames) * self.num_patches_per_frame + 1
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
return config
def __UpperCamelCase ( self : int , a : Tuple , a : List[Any] , a : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TimesformerModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any] , a : Tuple , a : Dict , a : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TimesformerForVideoClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a )
# verify the logits shape
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , a )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase__ =(
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = TimesformerModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(
self , config_class=a , has_text_modality=a , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] , a : Union[str, Any] , a : Dict , a : Optional[Any]=False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(a )
if return_labels:
if model_class in get_values(a ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(a )
SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*a )
@slow
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = TimesformerModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.num_frames
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Any = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE : List[Any] = len(a )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(a : Tuple , a : Dict , a : Tuple ):
SCREAMING_SNAKE_CASE : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(a , a , a )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[int] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset")
SCREAMING_SNAKE_CASE : Any = np.load(_a)
return list(_a)
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
a )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_video()
SCREAMING_SNAKE_CASE : Dict = image_processor(video[:8] , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 25
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : List[str] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ['ConvNextFeatureExtractor']
__lowercase : int = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 476
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase__ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE )
lowercase__ : str = [t[-1] for t in os.walk(os.path.join(SCREAMING_SNAKE_CASE , os.listdir(SCREAMING_SNAKE_CASE )[0] , "snapshots" ) )]
lowercase__ : Union[str, Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Dict ):
lowercase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ : Dict = jax.random.PRNGKey(0 )
lowercase__ : str = 4
lowercase__ : Tuple = jax.device_count()
lowercase__ : Tuple = num_samples * [prompt]
lowercase__ : Union[str, Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase__ : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = shard(SCREAMING_SNAKE_CASE )
lowercase__ : Any = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
lowercase__ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(SCREAMING_SNAKE_CASE ) == num_samples
def snake_case ( self : Dict ):
lowercase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowercase__ : str = 50
lowercase__ : List[str] = jax.device_count()
lowercase__ : Any = num_samples * [prompt]
lowercase__ : List[Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase__ : List[Any] = replicate(SCREAMING_SNAKE_CASE )
lowercase__ : str = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = shard(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def snake_case ( self : int ):
lowercase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ : Any = jax.random.PRNGKey(0 )
lowercase__ : List[Any] = 50
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Union[str, Any] = num_samples * [prompt]
lowercase__ : Optional[int] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase__ : Dict = replicate(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = shard(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def snake_case ( self : Optional[int] ):
lowercase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
lowercase__ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ : int = jax.random.PRNGKey(0 )
lowercase__ : Union[str, Any] = 50
lowercase__ : Optional[Any] = jax.device_count()
lowercase__ : Optional[int] = num_samples * [prompt]
lowercase__ : int = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase__ : int = replicate(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = shard(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def snake_case ( self : Optional[int] ):
lowercase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=SCREAMING_SNAKE_CASE , steps_offset=1 , )
lowercase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , )
lowercase__ : List[Any] = scheduler.create_state()
lowercase__ : Optional[Any] = scheduler_state
lowercase__ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ : int = jax.random.PRNGKey(0 )
lowercase__ : Union[str, Any] = 50
lowercase__ : Optional[int] = jax.device_count()
lowercase__ : Tuple = num_samples * [prompt]
lowercase__ : Dict = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase__ : Optional[Any] = replicate(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = jax.random.split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = shard(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def snake_case ( self : List[Any] ):
lowercase__ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowercase__ : List[str] = jax.device_count()
lowercase__ : Any = num_samples * [prompt]
lowercase__ : int = jax.random.split(jax.random.PRNGKey(0 ) , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE , )
lowercase__ : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = shard(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowercase__ : Union[str, Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowercase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=SCREAMING_SNAKE_CASE , use_memory_efficient_attention=SCREAMING_SNAKE_CASE , )
lowercase__ : List[Any] = replicate(SCREAMING_SNAKE_CASE )
lowercase__ : int = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = shard(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = pipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowercase__ : List[Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 706
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def A(__a: bytes , __a: int ):
lowerCAmelCase_ = F"{sampling_rate}"
lowerCAmelCase_ = "1"
lowerCAmelCase_ = "f32le"
lowerCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__a , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase_ = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
lowerCAmelCase_ = output_stream[0]
lowerCAmelCase_ = np.frombuffer(__a , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def A(__a: int , __a: float , __a: str = "f32le" , ):
lowerCAmelCase_ = F"{sampling_rate}"
lowerCAmelCase_ = "1"
if format_for_conversion == "s16le":
lowerCAmelCase_ = 2
elif format_for_conversion == "f32le":
lowerCAmelCase_ = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
lowerCAmelCase_ = platform.system()
if system == "Linux":
lowerCAmelCase_ = "alsa"
lowerCAmelCase_ = "default"
elif system == "Darwin":
lowerCAmelCase_ = "avfoundation"
lowerCAmelCase_ = ":0"
elif system == "Windows":
lowerCAmelCase_ = "dshow"
lowerCAmelCase_ = "default"
lowerCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
lowerCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase_ = _ffmpeg_stream(__a , __a )
for item in iterator:
yield item
def A(__a: int , __a: float , __a: Optional[int] = None , __a: Optional[Union[Tuple[float, float], float]] = None , __a: str = "f32le" , ):
if stream_chunk_s is not None:
lowerCAmelCase_ = stream_chunk_s
else:
lowerCAmelCase_ = chunk_length_s
lowerCAmelCase_ = ffmpeg_microphone(__a , __a , format_for_conversion=__a )
if format_for_conversion == "s16le":
lowerCAmelCase_ = np.intaa
lowerCAmelCase_ = 2
elif format_for_conversion == "f32le":
lowerCAmelCase_ = np.floataa
lowerCAmelCase_ = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
lowerCAmelCase_ = chunk_length_s / 6
lowerCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a , (int, float) ):
lowerCAmelCase_ = [stride_length_s, stride_length_s]
lowerCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase_ = datetime.datetime.now()
lowerCAmelCase_ = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a , __a , stride=(stride_left, stride_right) , stream=__a ):
# Put everything back in numpy scale
lowerCAmelCase_ = np.frombuffer(item["raw"] , dtype=__a )
lowerCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
lowerCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def A(__a: int , __a: int , __a: Tuple[int, int] , __a: bool = False ):
lowerCAmelCase_ = b""
lowerCAmelCase_ , lowerCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
lowerCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
lowerCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase_ = (_stride_left, stride_right)
lowerCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
lowerCAmelCase_ = False
yield item
lowerCAmelCase_ = stride_left
lowerCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
lowerCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
lowerCAmelCase_ = False
yield item
def A(__a: Tuple , __a: int ):
lowerCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__a , stdout=subprocess.PIPE , bufsize=__a ) as ffmpeg_process:
while True:
lowerCAmelCase_ = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 122
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase__ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122
| 1
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : str = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
__SCREAMING_SNAKE_CASE: Optional[Any] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__SCREAMING_SNAKE_CASE: List[str] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=UpperCamelCase__ , output_all_encodings=UpperCamelCase__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , UpperCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__SCREAMING_SNAKE_CASE: Dict = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
__SCREAMING_SNAKE_CASE: List[str] = os.path.join(get_home_dir() , '''models''' )
__SCREAMING_SNAKE_CASE: str = _load_vocab(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , cls=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Union[str, Any] = nlp.model.BERTModel(
UpperCamelCase__ , len(UpperCamelCase__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=UpperCamelCase__ , use_token_type_embed=UpperCamelCase__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=UpperCamelCase__ , use_decoder=UpperCamelCase__ , )
original_bort.load_parameters(UpperCamelCase__ , cast_dtype=UpperCamelCase__ , ignore_extra=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: List[str] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__SCREAMING_SNAKE_CASE: str = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(UpperCamelCase__ ),
}
__SCREAMING_SNAKE_CASE: Dict = BertConfig.from_dict(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: List[Any] = BertForMaskedLM(UpperCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(UpperCamelCase__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
__SCREAMING_SNAKE_CASE: List[Any] = hf_param.shape
__SCREAMING_SNAKE_CASE: int = to_torch(params[gluon_param] )
__SCREAMING_SNAKE_CASE: int = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__SCREAMING_SNAKE_CASE: Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
__SCREAMING_SNAKE_CASE: int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
__SCREAMING_SNAKE_CASE: Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
__SCREAMING_SNAKE_CASE: Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__SCREAMING_SNAKE_CASE: List[str] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__SCREAMING_SNAKE_CASE: BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__SCREAMING_SNAKE_CASE: BertSelfAttention = layer.attention.self
__SCREAMING_SNAKE_CASE: List[Any] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__SCREAMING_SNAKE_CASE: Optional[Any] = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__SCREAMING_SNAKE_CASE: Tuple = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__SCREAMING_SNAKE_CASE: Dict = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__SCREAMING_SNAKE_CASE: int = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__SCREAMING_SNAKE_CASE: str = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__SCREAMING_SNAKE_CASE: BertSelfOutput = layer.attention.output
__SCREAMING_SNAKE_CASE: int = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
__SCREAMING_SNAKE_CASE: List[Any] = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
__SCREAMING_SNAKE_CASE: Union[str, Any] = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__SCREAMING_SNAKE_CASE: Union[str, Any] = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__SCREAMING_SNAKE_CASE: BertIntermediate = layer.intermediate
__SCREAMING_SNAKE_CASE: Union[str, Any] = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__SCREAMING_SNAKE_CASE: List[Any] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__SCREAMING_SNAKE_CASE: BertOutput = layer.output
__SCREAMING_SNAKE_CASE: List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__SCREAMING_SNAKE_CASE: Any = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__SCREAMING_SNAKE_CASE: str = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__SCREAMING_SNAKE_CASE: Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__SCREAMING_SNAKE_CASE: List[Any] = RobertaTokenizer.from_pretrained('''roberta-base''' )
__SCREAMING_SNAKE_CASE: Tuple = tokenizer.encode_plus(UpperCamelCase__ )['''input_ids''']
# Get gluon output
__SCREAMING_SNAKE_CASE: Any = mx.nd.array([input_ids] )
__SCREAMING_SNAKE_CASE: Union[str, Any] = original_bort(inputs=UpperCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Dict = BertModel.from_pretrained(UpperCamelCase__ )
hf_bort_model.eval()
__SCREAMING_SNAKE_CASE: int = tokenizer.encode_plus(UpperCamelCase__ , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE: List[Any] = hf_bort_model(**UpperCamelCase__ )[0]
__SCREAMING_SNAKE_CASE: Union[str, Any] = output_gluon[0].asnumpy()
__SCREAMING_SNAKE_CASE: str = output_hf[0].detach().numpy()
__SCREAMING_SNAKE_CASE: List[str] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__SCREAMING_SNAKE_CASE: Dict = np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , UpperCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : Tuple = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 146
|
from math import ceil
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = list(range(0 , UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__SCREAMING_SNAKE_CASE: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase__ )
# Missing blocks
__SCREAMING_SNAKE_CASE: Any = [i for i in blocks if i not in device_map_blocks]
__SCREAMING_SNAKE_CASE: List[str] = [i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = list(range(UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[int] = int(ceil(n_layers / len(UpperCamelCase__ ) ) )
__SCREAMING_SNAKE_CASE: str = [layers[i : i + n_blocks] for i in range(0 , UpperCamelCase__ , UpperCamelCase__ )]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
| 146
| 1
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
lowercase = min(lowercase_ ) # min() finds the minimum value
lowercase = max(lowercase_ ) # max() finds the maximum value
lowercase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase_ , lowercase_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase = 0
for count in range(lowercase_ ):
while holes[count] > 0:
holes[count] -= 1
lowercase = count + min_val
i += 1
def SCREAMING_SNAKE_CASE ( ):
lowercase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase_ )
print("""Sorted order is:""" , """ """.join(lowercase_ ) )
if __name__ == "__main__":
main()
| 588
|
'''simple docstring'''
import argparse
import os
import re
lowercase_ : Optional[Any] = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowercase_ : int = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ : Optional[int] = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ : List[Any] = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ : List[str] = re.compile(r'''\[([^\]]+)\]''')
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
lowercase = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : int="" , lowercase_ : int=None , lowercase_ : List[Any]=None ):
lowercase = 0
lowercase = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
lowercase = ["""\n""".join(lines[:index] )]
else:
lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
lowercase = [lines[index + 1]]
index += 1
else:
lowercase = []
else:
blocks.append("""\n""".join(lowercase_ ) )
lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append("""\n""".join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
def _inner(lowercase_ : str ):
return key(lowercase_ ).lower().replace("""_""" , """""" )
return _inner
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : List[str]=None ):
# If no key is provided, we use a noop.
def noop(lowercase_ : int ):
return x
if key is None:
lowercase = noop
# Constants are all uppercase, they go first.
lowercase = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
lowercase = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
# This inner function sort imports between [ ].
def _replace(lowercase_ : Union[str, Any] ):
lowercase = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase_ )] ) + "]"
lowercase = import_statement.split("""\n""" )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase = 2 if lines[1].strip() == """[""" else 1
lowercase = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase = keys[:-1]
lowercase = get_indent(lines[1] ) + """, """.join([F"""\"{k}\"""" for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : List[str]=True ):
with open(lowercase_ , encoding="""utf-8""" ) as f:
lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase = split_code_in_indented_blocks(
lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase = main_blocks[block_idx]
lowercase = block.split("""\n""" )
# Get to the start of the imports.
lowercase = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase = """\n""".join(block_lines[line_idx:-1] )
lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
lowercase = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase = 0
lowercase = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase_ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any]=True ):
lowercase = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
lowercase = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ )
if result:
lowercase = [os.path.join(lowercase_ , """__init__.py""" )]
if len(lowercase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowercase_ )} files, run `make style`.""" )
if __name__ == "__main__":
lowercase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase_ : Dict = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 588
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 563
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__lowercase = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
__lowercase = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
__lowercase = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return float((preds == labels).mean() )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
A_ = float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 563
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 19
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "mobilenet_v1"
def __init__( self : Optional[int] , __a : List[str]=3 , __a : Union[str, Any]=224 , __a : Tuple=1.0 , __a : List[Any]=8 , __a : Union[str, Any]="relu6" , __a : Dict=True , __a : Tuple=0.9_99 , __a : Dict=0.02 , __a : Any=0.0_01 , **__a : Any , ) -> int:
super().__init__(**__a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : Optional[Any] = depth_multiplier
_UpperCamelCase : int = min_depth
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = tf_padding
_UpperCamelCase : int = classifier_dropout_prob
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Union[str, Any] = layer_norm_eps
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
return 1e-4
| 624
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ : Any = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ : List[str] = {
"camembert-base": 5_1_2,
}
lowerCamelCase__ : Tuple = "▁"
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = ['input_ids', 'attention_mask']
__lowerCAmelCase : Tuple = CamembertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : List[str] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase) if isinstance(__UpperCamelCase , __UpperCamelCase) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : Optional[Any] = False if not self.vocab_file else True
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
lowercase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""")
if not os.path.isdir(__UpperCamelCase):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase__ : str = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCamelCase):
copyfile(self.vocab_file , __UpperCamelCase)
return (out_vocab_file,)
| 707
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( UpperCAmelCase_ ):
@staticmethod
@abstractmethod
def lowercase__ ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 495
| 0
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71
|
"""simple docstring"""
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
return number | (1 << position)
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def A_ ( lowercase , lowercase ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase ={
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 717
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> Dict:
A = path_or_paths
A = split if split or isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else """train"""
A = features
A = cache_dir
A = keep_in_memory
A = streaming
A = num_proc
A = kwargs
@abstractmethod
def UpperCamelCase__ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> int:
A = features
A = cache_dir
A = keep_in_memory
A = streaming
A = num_proc
A = kwargs
@abstractmethod
def UpperCamelCase__ ( self ) -> Union[Dataset, IterableDataset]:
pass
| 255
| 0
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case=True, __snake_case="pt" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {'''add_prefix_space''': True} if isinstance(a_, a_ ) and not line.startswith(''' ''' ) else {}
_UpperCamelCase = padding_side
return tokenizer(
[line], max_length=a_, padding='''max_length''' if pad_to_max_length else None, truncation=a_, return_tensors=a_, add_special_tokens=a_, **a_, )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = input_ids.ne(a_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _UpperCAmelCase( __UpperCAmelCase ):
def __init__( self , __a , __a , __a , __a , __a="train" , __a=None , __a=None , __a=None , __a="" , ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = Path(snake_case__).joinpath(type_path + '''.source''')
_UpperCamelCase = Path(snake_case__).joinpath(type_path + '''.target''')
_UpperCamelCase = self.get_char_lens(self.src_file)
_UpperCamelCase = max_source_length
_UpperCamelCase = max_target_length
assert min(self.src_lens) > 0, F'''found empty line in {self.src_file}'''
_UpperCamelCase = tokenizer
_UpperCamelCase = prefix
if n_obs is not None:
_UpperCamelCase = self.src_lens[:n_obs]
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
def __len__( self) -> str:
'''simple docstring'''
return len(self.src_lens)
def __getitem__( self , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = index + 1 # linecache starts at 1
_UpperCamelCase = self.prefix + linecache.getline(str(self.src_file) , snake_case__).rstrip('''\n''')
_UpperCamelCase = linecache.getline(str(self.tgt_file) , snake_case__).rstrip('''\n''')
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__) else self.tokenizer
)
_UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__) else self.tokenizer
_UpperCamelCase = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''')
_UpperCamelCase = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''')
_UpperCamelCase = source_inputs['''input_ids'''].squeeze()
_UpperCamelCase = target_inputs['''input_ids'''].squeeze()
_UpperCamelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase ( __a) -> Tuple:
'''simple docstring'''
return [len(snake_case__) for x in Path(snake_case__).open().readlines()]
def UpperCAmelCase ( self , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = torch.stack([x['''input_ids'''] for x in batch])
_UpperCamelCase = torch.stack([x['''attention_mask'''] for x in batch])
_UpperCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch])
_UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__)
else self.tokenizer.pad_token_id
)
_UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__)
else self.tokenizer.pad_token_id
)
_UpperCamelCase = trim_batch(snake_case__ , snake_case__)
_UpperCamelCase = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__)
_UpperCamelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
_a = getLogger(__name__)
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return list(itertools.chain.from_iterable(a_ ) )
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
_UpperCamelCase = get_git_info()
save_json(a_, os.path.join(a_, '''git_log.json''' ) )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=4, **__snake_case ) -> str:
"""simple docstring"""
with open(a_, '''w''' ) as f:
json.dump(a_, a_, indent=a_, **a_ )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with open(a_ ) as f:
return json.load(a_ )
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = git.Repo(search_parent_directories=a_ )
_UpperCamelCase = {
'''repo_id''': str(a_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List:
"""simple docstring"""
return list(map(a_, a_ ) )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
with open(a_, '''wb''' ) as f:
return pickle.dump(a_, a_ )
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
def remove_articles(__snake_case ):
return re.sub(r'''\b(a|an|the)\b''', ''' ''', a_ )
def white_space_fix(__snake_case ):
return " ".join(text.split() )
def remove_punc(__snake_case ):
_UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_ ) ) ) )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = normalize_answer(a_ ).split()
_UpperCamelCase = normalize_answer(a_ ).split()
_UpperCamelCase = Counter(a_ ) & Counter(a_ )
_UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCamelCase = 1.0 * num_same / len(a_ )
_UpperCamelCase = 1.0 * num_same / len(a_ )
_UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
return normalize_answer(a_ ) == normalize_answer(a_ )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
assert len(a_ ) == len(a_ )
_UpperCamelCase = 0
for hypo, pred in zip(a_, a_ ):
em += exact_match_score(a_, a_ )
if len(a_ ) > 0:
em /= len(a_ )
return {"em": em}
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCamelCase = '''dropout_rate'''
for p in extra_params:
if getattr(a_, a_, a_ ):
if not hasattr(a_, a_ ) and not hasattr(a_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(a_ ) )
delattr(a_, a_ )
continue
_UpperCamelCase = p if hasattr(a_, a_ ) else equivalent_param[p]
setattr(a_, a_, getattr(a_, a_ ) )
delattr(a_, a_ )
return hparams, config
| 19
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = ["image_processor", "tokenizer"]
__A : Dict = "BlipImageProcessor"
__A : Dict = "AutoTokenizer"
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str ):
'''simple docstring'''
lowercase :Dict = False
super().__init__(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = self.image_processor
def __call__( self : Optional[int] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase :List[Any] = self.tokenizer
lowercase :str = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowercase :Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowercase :int = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowercase :Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def __snake_case ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def __snake_case ( self : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.tokenizer.model_input_names
lowercase :List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 677
| 0
|
class lowercase__:
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str =name
UpperCamelCase__ : Tuple =value
UpperCamelCase__ : Optional[Any] =weight
def __repr__( self) -> Any:
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
return self.value
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
return self.name
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
return self.weight
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
return self.value / self.weight
def _lowerCamelCase ( A_ : int , A_ : Union[str, Any] , A_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] =[]
for i in range(len(A_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _lowerCamelCase ( A_ : Tuple , A_ : Any , A_ : int ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : int =sorted(A_ , key=A_ , reverse=A_ )
UpperCamelCase__ : List[Any] =[]
UpperCamelCase__ , UpperCamelCase__ : Tuple =0.0, 0.0
for i in range(len(A_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 582
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
__UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
__UpperCAmelCase = """roberta"""
elif args.model_type == "gpt2":
__UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
__UpperCAmelCase = """transformer"""
__UpperCAmelCase = model.state_dict()
__UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__UpperCAmelCase = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__UpperCAmelCase = F"""{prefix}.embeddings.{w}.weight"""
__UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
__UpperCAmelCase = F"""{prefix}.embeddings.LayerNorm.{w}"""
__UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
__UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__UpperCAmelCase = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__UpperCAmelCase = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[F"""lm_head.dense.{w}"""]
__UpperCAmelCase = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[F"""{prefix}.ln_f.{w}"""]
__UpperCAmelCase = state_dict["""lm_head.weight"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 582
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
_a = BartphoTokenizer
_a = False
_a = True
def __lowercase ( self : int ):
super().setUp()
lowerCAmelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
lowerCAmelCase = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
lowerCAmelCase = BartphoTokenizer(SCREAMING_SNAKE_CASE_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Dict , **lowerCAmelCase : int ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Any , lowerCAmelCase : Dict ):
lowerCAmelCase = """This is a là test"""
lowerCAmelCase = """This is a<unk><unk> test"""
return input_text, output_text
def __lowercase ( self : int ):
lowerCAmelCase = BartphoTokenizer(SCREAMING_SNAKE_CASE_ , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase = """This is a là test"""
lowerCAmelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
lowerCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = tokens + [tokenizer.unk_token]
lowerCAmelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 169
|
def __UpperCamelCase ( A = 10**12 ):
UpperCamelCase__ = 1
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 415
| 0
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__a : Optional[Any] = """pytorch_model.bin"""
__a : Dict = """pytorch_model.bin.index.json"""
__a : Any = """adapter_config.json"""
__a : Union[str, Any] = """adapter_model.bin"""
__a : List[Any] = """adapter_model.safetensors"""
__a : List[Any] = """tf_model.h5"""
__a : Optional[int] = """tf_model.h5.index.json"""
__a : Optional[Any] = """model.ckpt"""
__a : List[Any] = """flax_model.msgpack"""
__a : List[str] = """flax_model.msgpack.index.json"""
__a : str = """model.safetensors"""
__a : List[str] = """model.safetensors.index.json"""
__a : Union[str, Any] = """config.json"""
__a : str = """preprocessor_config.json"""
__a : Union[str, Any] = FEATURE_EXTRACTOR_NAME
__a : Any = """generation_config.json"""
__a : int = """modelcard.json"""
__a : List[Any] = """▁"""
__a : Optional[int] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__a : Optional[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__a : Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__a : Tuple = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def a_ ( __snake_case ) -> Optional[int]:
'''simple docstring'''
if version.parse(__snake_case ) < version.parse(__snake_case ):
if "dev" in min_version:
UpperCamelCase_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
UpperCamelCase_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 559
|
import argparse
__a : int = """docs/source/_static/js/custom.js"""
def a_ ( __snake_case ) -> Optional[int]:
'''simple docstring'''
with open(__snake_case , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase_ = f.readlines()
UpperCamelCase_ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCamelCase_ = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
__a : Tuple = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
__a : str = parser.parse_args()
update_custom_js(args.version)
| 559
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = """"""
for word_or_phrase in separated:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(UpperCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 412
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a = 2
class UpperCamelCase__ :
def __init__( self : Any , *, # begin keyword-only arguments
UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : int="<unk>" , UpperCamelCase__ : Dict=None , ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = bos, unk, pad, eos
lowercase_ = []
lowercase_ = []
lowercase_ = {}
lowercase_ = self.add_symbol(UpperCamelCase__ )
lowercase_ = self.add_symbol(UpperCamelCase__ )
lowercase_ = self.add_symbol(UpperCamelCase__ )
lowercase_ = self.add_symbol(UpperCamelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCamelCase__ )
lowercase_ = len(self.symbols )
def __eq__( self : int , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sym in self.indices
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = cls()
d.add_from_file(UpperCamelCase__ )
return d
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Any=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
lowercase_ = self.indices[word]
lowercase_ = self.count[idx] + n
return idx
else:
lowercase_ = len(self.symbols )
lowercase_ = idx
self.symbols.append(UpperCamelCase__ )
self.count.append(UpperCamelCase__ )
return idx
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return 0
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(UpperCamelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(UpperCamelCase__ ) )
return
lowercase_ = f.readlines()
lowercase_ = self._load_meta(UpperCamelCase__ )
for line in lines[indices_start_line:]:
try:
lowercase_ , lowercase_ = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
lowercase_ = True
lowercase_ , lowercase_ = line.rsplit(""" """ , 1 )
else:
lowercase_ = False
lowercase_ = int(UpperCamelCase__ )
lowercase_ = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(UpperCamelCase__ ) )
self.add_symbol(UpperCamelCase__ , n=UpperCamelCase__ , overwrite=UpperCamelCase__ )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase_ = dict((re.sub(r"""@@$""" , """""" , UpperCAmelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , UpperCAmelCase__ ), v) for k, v in d.items() )
lowercase_ = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
lowercase_ = d[k] # restore
return da
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
# prep
if not os.path.exists(UpperCAmelCase__ ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowercase_ = os.path.join(UpperCAmelCase__ , """checkpoint.pt""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
lowercase_ = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
lowercase_ = chkpt["""cfg"""]["""model"""]
# dicts
lowercase_ = os.path.join(UpperCAmelCase__ , """dict.txt""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
lowercase_ = Dictionary.load(UpperCAmelCase__ )
lowercase_ = rewrite_dict_keys(src_dict.indices )
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# merges_file (bpecodes)
lowercase_ = os.path.join(UpperCAmelCase__ , """bpecodes""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
lowercase_ = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCAmelCase__ , UpperCAmelCase__ )
# model config
lowercase_ = os.path.join(UpperCAmelCase__ , """config.json""" )
lowercase_ = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# tokenizer config
lowercase_ = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_0_2_4,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# model
lowercase_ = chkpt["""model"""]
# remove unneeded keys
lowercase_ = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
lowercase_ = model_state_dict.pop(UpperCAmelCase__ )
else:
lowercase_ = model_state_dict.pop(UpperCAmelCase__ )
lowercase_ = BioGptConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = BioGptForCausalLM(UpperCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase__ )
# save
lowercase_ = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
print("""Conversion is done!""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 412
| 1
|
from __future__ import annotations
from collections import deque
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(_SCREAMING_SNAKE_CASE )
self.set_fail_transitions()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
for character in keyword:
_UpperCAmelCase = self.find_next_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_UpperCAmelCase = len(self.adlist ) - 1
else:
_UpperCAmelCase = next_state
self.adlist[current_state]["output"].append(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
_UpperCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
while q:
_UpperCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(_SCREAMING_SNAKE_CASE , self.adlist[child]["""value"""] ) is None
and state != 0
):
_UpperCAmelCase = self.adlist[state]["""fail_state"""]
_UpperCAmelCase = self.find_next_state(
_SCREAMING_SNAKE_CASE , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
_UpperCAmelCase = 0
_UpperCAmelCase = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {} # returns a dict with keywords and list of its occurrences
_UpperCAmelCase = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
while (
self.find_next_state(_SCREAMING_SNAKE_CASE , string[i] ) is None
and current_state != 0
):
_UpperCAmelCase = self.adlist[current_state]["""fail_state"""]
_UpperCAmelCase = self.find_next_state(_SCREAMING_SNAKE_CASE , string[i] )
if next_state is None:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_UpperCAmelCase = []
result[key].append(i - len(_SCREAMING_SNAKE_CASE ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
|
from typing import Dict
from .base import GenericTensor, Pipeline
class _A ( __lowercase ):
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
if tokenize_kwargs is None:
_UpperCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
_UpperCAmelCase = truncation
_UpperCAmelCase = tokenize_kwargs
_UpperCAmelCase = {}
if return_tensors is not None:
_UpperCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.framework
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_inputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 175
| 1
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _A ( lowercase__="ro" , lowercase__="en" , lowercase__="wmt16" , lowercase__=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
lowercase__ = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
lowercase__ = datasets.load_dataset(_lowercase , _lowercase )
if save_dir is None:
lowercase__ = f'''{dataset}-{pair}'''
lowercase__ = Path(_lowercase )
save_dir.mkdir(exist_ok=_lowercase )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
lowercase__ = """val""" if split == """validation""" else split
lowercase__ = save_dir.joinpath(f'''{fn}.source''' )
lowercase__ = save_dir.joinpath(f'''{fn}.target''' )
lowercase__ = src_path.open("""w+""" )
lowercase__ = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase__ = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 325
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCamelCase__ ( _lowercase : Dict ) -> str:
for param in module.parameters():
__UpperCAmelCase: int = False
def UpperCamelCase__ ( ) -> List[Any]:
__UpperCAmelCase: int = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCAmelCase: Dict = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def UpperCamelCase__ ( _lowercase : List[str] ) -> List[str]:
__UpperCAmelCase: List[str] = plt.imshow(_lowercase )
fig.axes.get_xaxis().set_visible(_lowercase )
fig.axes.get_yaxis().set_visible(_lowercase )
plt.show()
def UpperCamelCase__ ( ) -> List[Any]:
__UpperCAmelCase: Any = datetime.now()
__UpperCAmelCase: Any = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 523
| 0
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class A_:
"""simple docstring"""
def __init__( self ):
_lowerCamelCase : Dict = [2, 1, 2, -1]
_lowerCamelCase : str = [1, 2, 3, 4]
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = len(self.first_signal )
_lowerCamelCase : Union[str, Any] = len(self.second_signal )
_lowerCamelCase : Union[str, Any] = max(__a , __a )
# create a zero matrix of max_length x max_length
_lowerCamelCase : List[str] = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
_lowerCamelCase : Any = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
_lowerCamelCase : Tuple = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 720
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : int = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowerCamelCase : List[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(__a )
DownloadCommand.register_subcommand(__a )
EnvironmentCommand.register_subcommand(__a )
RunCommand.register_subcommand(__a )
ServeCommand.register_subcommand(__a )
UserCommands.register_subcommand(__a )
AddNewModelCommand.register_subcommand(__a )
AddNewModelLikeCommand.register_subcommand(__a )
LfsCommands.register_subcommand(__a )
PTtoTFCommand.register_subcommand(__a )
# Let's go
_lowerCamelCase : Any = parser.parse_args()
if not hasattr(__a , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowerCamelCase : List[Any] = args.func(__a )
service.run()
if __name__ == "__main__":
main()
| 349
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A_(__snake_case ):
"""simple docstring"""
a_ : torch.FloatTensor
class A_(__snake_case , __snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self , A = 6_5536 , A = None , A = 2 , A = 2 , A = 0 , A = "fourier" , A = True , A = False , A = 0.0 , A = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A = "UNetMidBlock1D" , A = None , A = (32, 32, 64) , A = None , A = 8 , A = 1 , A = False , ):
super().__init__()
_lowerCamelCase : str = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A , log=A , flip_sin_to_cos=A )
_lowerCamelCase : Optional[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A , downscale_freq_shift=A )
_lowerCamelCase : List[str] = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase : Tuple = block_out_channels[0] * 4
_lowerCamelCase : Tuple = TimestepEmbedding(
in_channels=A , time_embed_dim=A , act_fn=A , out_dim=block_out_channels[0] , )
_lowerCamelCase : List[str] = nn.ModuleList([] )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[str] = nn.ModuleList([] )
_lowerCamelCase : List[str] = None
# down
_lowerCamelCase : Dict = in_channels
for i, down_block_type in enumerate(A ):
_lowerCamelCase : Dict = output_channel
_lowerCamelCase : List[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase : Optional[Any] = i == len(A ) - 1
_lowerCamelCase : List[str] = get_down_block(
A , num_layers=A , in_channels=A , out_channels=A , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A )
# mid
_lowerCamelCase : List[Any] = get_mid_block(
A , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A , add_downsample=A , )
# up
_lowerCamelCase : Dict = list(reversed(A ) )
_lowerCamelCase : Dict = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase : int = out_channels
else:
_lowerCamelCase : Dict = block_out_channels[0]
for i, up_block_type in enumerate(A ):
_lowerCamelCase : Dict = output_channel
_lowerCamelCase : int = (
reversed_block_out_channels[i + 1] if i < len(A ) - 1 else final_upsample_channels
)
_lowerCamelCase : Optional[int] = i == len(A ) - 1
_lowerCamelCase : Union[str, Any] = get_up_block(
A , num_layers=A , in_channels=A , out_channels=A , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A )
_lowerCamelCase : Tuple = output_channel
# out
_lowerCamelCase : Any = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_lowerCamelCase : Dict = get_out_block(
out_block_type=A , num_groups_out=A , embed_dim=block_out_channels[0] , out_channels=A , act_fn=A , fc_dim=block_out_channels[-1] // 4 , )
def _lowerCAmelCase ( self , A , A , A = True , ):
_lowerCamelCase : List[str] = timestep
if not torch.is_tensor(A ):
_lowerCamelCase : str = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
_lowerCamelCase : Optional[Any] = timesteps[None].to(sample.device )
_lowerCamelCase : Optional[Any] = self.time_proj(A )
if self.config.use_timestep_embedding:
_lowerCamelCase : List[str] = self.time_mlp(A )
else:
_lowerCamelCase : Any = timestep_embed[..., None]
_lowerCamelCase : List[str] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase : Union[str, Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase : List[str] = ()
for downsample_block in self.down_blocks:
_lowerCamelCase : List[Any] = downsample_block(hidden_states=A , temb=A )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase : Dict = self.mid_block(A , A )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase : Any = down_block_res_samples[-1:]
_lowerCamelCase : Optional[int] = down_block_res_samples[:-1]
_lowerCamelCase : Optional[Any] = upsample_block(A , res_hidden_states_tuple=A , temb=A )
# 5. post-process
if self.out_block:
_lowerCamelCase : Dict = self.out_block(A , A )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A )
| 437
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCamelCase :
def __init__( self :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :Union[str, Any]=None , __magic_name__ :List[Any]=None , __magic_name__ :Dict=None , __magic_name__ :List[Any]="resnet50" , __magic_name__ :Tuple=3 , __magic_name__ :Optional[Any]=32 , __magic_name__ :str=3 , __magic_name__ :str=True , __magic_name__ :Optional[Any]=True , ) ->str:
lowercase : Tuple = parent
lowercase : Tuple = out_indices if out_indices is not None else [4]
lowercase : Union[str, Any] = stage_names
lowercase : Tuple = out_features
lowercase : Optional[int] = backbone
lowercase : Optional[Any] = batch_size
lowercase : Tuple = image_size
lowercase : Union[str, Any] = num_channels
lowercase : List[Any] = use_pretrained_backbone
lowercase : str = is_training
def __snake_case ( self :List[Any] ) ->Optional[Any]:
lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Optional[int] = self.get_config()
return config, pixel_values
def __snake_case ( self :Tuple ) ->int:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __snake_case ( self :Any , __magic_name__ :List[str] , __magic_name__ :Optional[int] ) ->Tuple:
lowercase : List[str] = TimmBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowercase : str = model(__magic_name__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __snake_case ( self :str ) ->Dict:
lowercase : Tuple = self.prepare_config_and_inputs()
lowercase , lowercase : List[Any] = config_and_inputs
lowercase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCamelCase (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __snake_case ( self :Dict ) ->List[str]:
lowercase : List[str] = TimmBackboneModelTester(self )
lowercase : Any = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def __snake_case ( self :Optional[Any] ) ->List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self :Union[str, Any] ) ->Optional[Any]:
lowercase : Dict = """resnet18"""
lowercase : int = """microsoft/resnet-18"""
lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ )
lowercase : int = AutoBackbone.from_pretrained(__magic_name__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowercase : Tuple = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ , out_indices=[1, 2, 3] )
lowercase : Any = AutoBackbone.from_pretrained(__magic_name__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def __snake_case ( self :Tuple ) ->Optional[int]:
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def __snake_case ( self :Optional[Any] ) ->int:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __snake_case ( self :Optional[Any] ) ->Optional[Any]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __snake_case ( self :int ) ->List[Any]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __snake_case ( self :int ) ->Tuple:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __snake_case ( self :List[Any] ) ->List[str]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :int ) ->Any:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __snake_case ( self :int ) ->int:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __snake_case ( self :str ) ->Union[str, Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :int ) ->Optional[int]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __snake_case ( self :List[Any] ) ->Optional[Any]:
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def __snake_case ( self :Union[str, Any] ) ->List[Any]:
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def __snake_case ( self :Tuple ) ->List[Any]:
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __snake_case ( self :List[Any] ) ->int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case ( self :Optional[Any] ) ->Optional[int]:
pass
def __snake_case ( self :Union[str, Any] ) ->Union[str, Any]:
lowercase , lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(__magic_name__ )
lowercase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Any = [*signature.parameters.keys()]
lowercase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __snake_case ( self :Any ) ->List[str]:
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = True
lowercase : int = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowercase : Union[str, Any] = self.all_model_classes[0]
lowercase : Tuple = model_class(__magic_name__ )
model.to(__magic_name__ )
lowercase : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowercase : Dict = model(**__magic_name__ )
lowercase : List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
lowercase : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowercase : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__magic_name__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __snake_case ( self :Any ) ->List[Any]:
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Any = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowercase : List[Any] = copy.deepcopy(__magic_name__ )
lowercase : Dict = None
lowercase : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Optional[Any] = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowercase : str = copy.deepcopy(__magic_name__ )
lowercase : int = False
lowercase : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowercase : Dict = model(**__magic_name__ )
| 264
| 0
|
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self ):
lowerCamelCase_ : Tuple = {}
def _UpperCamelCase ( self ):
print(self.vertex )
for i in self.vertex:
print(a_ , " -> " , " -> ".join([str(a_ ) for j in self.vertex[i]] ) )
def _UpperCamelCase ( self , a_ , a_ ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a_ )
else:
# else make a new vertex
lowerCamelCase_ : Any = [to_vertex]
def _UpperCamelCase ( self ):
# visited array for storing already visited nodes
lowerCamelCase_ : Optional[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ ):
# mark start vertex as visited
lowerCamelCase_ : Dict = True
print(a_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a_ , a_ )
if __name__ == "__main__":
__magic_name__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 711
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73
| 0
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 71
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : int = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def A( self , lowercase__=0):
__UpperCAmelCase : str = np.random.RandomState(lowercase__)
__UpperCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def A( self):
__UpperCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Tuple = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**lowercase__).images
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Optional[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**lowercase__).images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : List[str] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : Union[str, Any] = pipe(**lowercase__).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**lowercase__).images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : int = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Dict = self.get_dummy_inputs()
__UpperCAmelCase : Optional[int] = pipe(**lowercase__).images
__UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Dict = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**lowercase__).images
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__UpperCAmelCase : Dict = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A( self):
__UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : List[Any] = 3 * [inputs['''prompt''']]
# forward
__UpperCAmelCase : Dict = pipe(**lowercase__)
__UpperCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : Dict = 3 * [inputs.pop('''prompt''')]
__UpperCAmelCase : Tuple = pipe.tokenizer(
lowercase__ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors='''np''' , )
__UpperCAmelCase : Optional[Any] = text_inputs['''input_ids''']
__UpperCAmelCase : Union[str, Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
__UpperCAmelCase : Union[str, Any] = prompt_embeds
# forward
__UpperCAmelCase : Union[str, Any] = pipe(**lowercase__)
__UpperCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
def A( self):
__UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : int = self.get_dummy_inputs()
__UpperCAmelCase : Any = 3 * ['''this is a negative prompt''']
__UpperCAmelCase : Optional[int] = negative_prompt
__UpperCAmelCase : List[Any] = 3 * [inputs['''prompt''']]
# forward
__UpperCAmelCase : Any = pipe(**lowercase__)
__UpperCAmelCase : str = output.images[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
__UpperCAmelCase : Any = 3 * [inputs.pop('''prompt''')]
__UpperCAmelCase : Optional[int] = []
for p in [prompt, negative_prompt]:
__UpperCAmelCase : str = pipe.tokenizer(
lowercase__ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowercase__ , return_tensors='''np''' , )
__UpperCAmelCase : List[Any] = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = embeds
# forward
__UpperCAmelCase : Union[str, Any] = pipe(**lowercase__)
__UpperCAmelCase : str = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
@property
def A( self):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A( self):
__UpperCAmelCase : Optional[Any] = ort.SessionOptions()
__UpperCAmelCase : List[Any] = False
return options
def A( self):
# using the PNDM scheduler by default
__UpperCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : int = '''A painting of a squirrel eating a burger'''
np.random.seed(0)
__UpperCAmelCase : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='''np''')
__UpperCAmelCase : Dict = output.images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase : List[str] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
__UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Any = '''open neural network exchange'''
__UpperCAmelCase : Any = np.random.RandomState(0)
__UpperCAmelCase : List[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : List[str] = output.images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase : int = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
__UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Optional[Any] = '''open neural network exchange'''
__UpperCAmelCase : Optional[int] = np.random.RandomState(0)
__UpperCAmelCase : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : Union[str, Any] = output.images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase : Optional[int] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def A( self):
__UpperCAmelCase : Tuple = 0
def test_callback_fn(lowercase__ , lowercase__ , lowercase__) -> None:
__UpperCAmelCase : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[Any] = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
__UpperCAmelCase : Dict = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
__UpperCAmelCase : str = False
__UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Union[str, Any] = '''Andromeda galaxy in a bottle'''
__UpperCAmelCase : List[Any] = np.random.RandomState(0)
pipe(
prompt=lowercase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowercase__ , callback=lowercase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def A( self):
__UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowercase__ , lowercase__)
assert pipe.safety_checker is None
__UpperCAmelCase : Union[str, Any] = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(lowercase__)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCAmelCase : Optional[int] = pipe('''example prompt''' , num_inference_steps=2).images[0]
assert image is not None
| 462
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
lowerCAmelCase_ = list[tuple[int, int]]
lowerCAmelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = pos_x
UpperCamelCase : Optional[Any] = pos_y
UpperCamelCase : Dict = (pos_y, pos_x)
UpperCamelCase : Union[str, Any] = goal_x
UpperCamelCase : List[Any] = goal_y
UpperCamelCase : Optional[Any] = parent
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : str = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase )
UpperCamelCase : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase )
UpperCamelCase : List[str] = [self.start]
UpperCamelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Path | None:
'''simple docstring'''
while self.node_queue:
UpperCamelCase : Tuple = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCamelCase : Dict = True
return self.retrace_path(lowerCamelCase )
UpperCamelCase : str = self.get_successors(lowerCamelCase )
for node in successors:
self.node_queue.append(lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> list[Node]:
'''simple docstring'''
UpperCamelCase : List[Any] = []
for action in delta:
UpperCamelCase : Optional[int] = parent.pos_x + action[1]
UpperCamelCase : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , lowerCamelCase ) )
return successors
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> Path:
'''simple docstring'''
UpperCamelCase : Optional[Any] = node
UpperCamelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase : Tuple = current_node.parent
path.reverse()
return path
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> int:
'''simple docstring'''
UpperCamelCase : Tuple = BreadthFirstSearch(lowerCamelCase , lowerCamelCase )
UpperCamelCase : Optional[int] = BreadthFirstSearch(lowerCamelCase , lowerCamelCase )
UpperCamelCase : Any = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Path | None:
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCamelCase : Tuple = self.fwd_bfs.node_queue.pop(0 )
UpperCamelCase : Union[str, Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCamelCase : List[Any] = True
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
UpperCamelCase : Dict = current_bwd_node
UpperCamelCase : List[Any] = current_fwd_node
UpperCamelCase : Dict = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCamelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> Path:
'''simple docstring'''
UpperCamelCase : str = self.fwd_bfs.retrace_path(lowerCamelCase )
UpperCamelCase : List[Any] = self.bwd_bfs.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCAmelCase_ = (0, 0)
lowerCAmelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = BreadthFirstSearch(init, goal)
lowerCAmelCase_ = bfs.search()
lowerCAmelCase_ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = BidirectionalBreadthFirstSearch(init, goal)
lowerCAmelCase_ = bd_bfs.search()
lowerCAmelCase_ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 435
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 435
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A_: int = logging.get_logger(__name__)
class _lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ = ['pixel_values']
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase = size if size is not None else {'shortest_edge': 256}
_lowercase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
_lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowercase = get_size_dict(UpperCamelCase_ , param_name="""crop_size""" )
_lowercase = do_resize
_lowercase = size
_lowercase = resample
_lowercase = do_center_crop
_lowercase = crop_size
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_normalize
_lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase = get_resize_output_image_size(UpperCamelCase_ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ):
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = do_resize if do_resize is not None else self.do_resize
_lowercase = size if size is not None else self.size
_lowercase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
_lowercase = resample if resample is not None else self.resample
_lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase = crop_size if crop_size is not None else self.crop_size
_lowercase = get_size_dict(UpperCamelCase_ , param_name="""crop_size""" )
_lowercase = do_rescale if do_rescale is not None else self.do_rescale
_lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase = do_normalize if do_normalize is not None else self.do_normalize
_lowercase = image_mean if image_mean is not None else self.image_mean
_lowercase = image_std if image_std is not None else self.image_std
_lowercase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
_lowercase = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
_lowercase = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
_lowercase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
_lowercase = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
_lowercase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
_lowercase = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
_lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCamelCase_ ):
_lowercase = target_sizes.numpy()
_lowercase = []
for idx in range(len(UpperCamelCase_ ) ):
_lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=UpperCamelCase_ )
_lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
_lowercase = logits.argmax(dim=1 )
_lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 398
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = '''▁'''
UpperCamelCase_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
UpperCamelCase_ = {
'''google/pegasus-xsum''': 5_12,
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : int = PegasusTokenizer
A_ : Dict = ['input_ids', 'attention_mask']
def __init__( self : List[str] , UpperCamelCase_ : str=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[Any]="<mask_2>" , UpperCamelCase_ : int="<mask_1>" , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Any]=1_03 , **UpperCamelCase_ : Optional[Any] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Dict = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCamelCase_ )}, but is'''
f''' {type(UpperCamelCase_ )}''' )
SCREAMING_SNAKE_CASE__ :List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCamelCase_ ) , self.offset - 1 )
]
if len(set(UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE__ :List[str] = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE__ :Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ :int = vocab_file
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False if not self.vocab_file else True
def __lowerCamelCase ( self : int , UpperCamelCase_ : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ :Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 209
| 0
|
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = "AAPL" ):
snake_case__ = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case__ = BeautifulSoup(requests.get(__lowerCAmelCase ).text , "html.parser" )
snake_case__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 530
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return EnvironmentCommand()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return EnvironmentCommand(args.accelerate_config_file )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
@staticmethod
def A_ ( lowerCamelCase ):
snake_case__ = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCamelCase )
download_parser.add_argument(
"--accelerate-config_file" , default=lowerCamelCase , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , *lowerCamelCase ):
snake_case__ = accelerate_config_file
def A_ ( self ):
snake_case__ = "not installed"
if is_safetensors_available():
import safetensors
snake_case__ = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
snake_case__ = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
snake_case__ = "not installed"
snake_case__ = snake_case__ = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
snake_case__ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCamelCase ):
snake_case__ = load_config_from_file(self._accelerate_config_file ).to_dict()
snake_case__ = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase , lowerCamelCase )
else F"""\t{accelerate_config}"""
)
snake_case__ = "not installed"
snake_case__ = "NA"
if is_torch_available():
import torch
snake_case__ = torch.__version__
snake_case__ = torch.cuda.is_available()
snake_case__ = "not installed"
snake_case__ = "NA"
if is_tf_available():
import tensorflow as tf
snake_case__ = tf.__version__
try:
# deprecated in v2.1
snake_case__ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
snake_case__ = bool(tf.config.list_physical_devices("GPU" ) )
snake_case__ = "not installed"
snake_case__ = "not installed"
snake_case__ = "not installed"
snake_case__ = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
snake_case__ = flax.__version__
snake_case__ = jax.__version__
snake_case__ = jaxlib.__version__
snake_case__ = jax.lib.xla_bridge.get_backend().platform
snake_case__ = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F"""{safetensors_version}""",
"Accelerate version": F"""{accelerate_version}""",
"Accelerate config": F"""{accelerate_config_str}""",
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": F"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": F"""{flax_version} ({jax_backend})""",
"Jax version": F"""{jax_version}""",
"JaxLib version": F"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCamelCase ) )
return info
@staticmethod
def A_ ( lowerCamelCase ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 530
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''swinv2'''
_lowercase : Tuple = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple=224 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: int=3 , UpperCamelCase_: List[Any]=96 , UpperCamelCase_: str=[2, 2, 6, 2] , UpperCamelCase_: Dict=[3, 6, 12, 24] , UpperCamelCase_: List[str]=7 , UpperCamelCase_: Optional[int]=4.0 , UpperCamelCase_: int=True , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: Union[str, Any]=1E-5 , UpperCamelCase_: Optional[int]=32 , **UpperCamelCase_: List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(UpperCamelCase_ )
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
lowercase__ = (0, 0, 0, 0)
| 43
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModel(config=UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
# expected sequence length = num_patches
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[int] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFViTMAEModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = outputs_dict[0].numpy()
lowercase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase_: List[Any] ):
lowercase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase_ ):
lowercase__ = v.numpy()
else:
lowercase__ = np.array(UpperCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = prepare_numpy_arrays(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.constant(UpperCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = tf_noise
super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase_ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ )
}
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ = tf.convert_to_tensor(UpperCamelCase_ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ = main_layer_class(UpperCamelCase_ )
lowercase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) )
lowercase__ = model(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' )
model.save(UpperCamelCase_ )
lowercase__ = tf.keras.models.load_model(
UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase_ , tf.keras.Model )
lowercase__ = model(UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = outputs.last_hidden_state.numpy()
lowercase__ = 0
else:
lowercase__ = outputs.logits.numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowercase__ = model_class.from_pretrained(UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ = after_outputs['''last_hidden_state'''].numpy()
lowercase__ = 0
else:
lowercase__ = after_outputs['''logits'''].numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = int((config.image_size // config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ )
lowercase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase_ )
lowercase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ = model_class.from_config(model.config )
lowercase__ = new_model(UpperCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ )
self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
np.random.seed(2 )
lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ )
# verify the logits
lowercase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 43
| 1
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = t // 36_00, (t // 60) % 60, t % 60
return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}"
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3_00 ) -> Union[str, Any]:
"""simple docstring"""
# docstyle-ignore
return F"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_SCREAMING_SNAKE_CASE = F"{elt:.6f}" if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else str(SCREAMING_SNAKE_CASE_ )
html_code += F" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = 0.2
def __init__( self , A__ , A__ = None , A__ = True , A__ = None , A__ = 3_00 , ) -> int:
_SCREAMING_SNAKE_CASE = total
_SCREAMING_SNAKE_CASE = """""" if prefix is None else prefix
_SCREAMING_SNAKE_CASE = leave
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = width
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self , A__ , A__ = False , A__ = None ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = value
if comment is not None:
_SCREAMING_SNAKE_CASE = comment
if self.last_value is None:
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = value
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.warmup
_SCREAMING_SNAKE_CASE = 1
self.update_bar(A__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_SCREAMING_SNAKE_CASE = self.elapsed_time / (value - self.start_value)
else:
_SCREAMING_SNAKE_CASE = None
if value >= self.total:
_SCREAMING_SNAKE_CASE = self.total
_SCREAMING_SNAKE_CASE = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_SCREAMING_SNAKE_CASE = self.average_time_per_item * (self.total - value)
self.update_bar(A__ )
_SCREAMING_SNAKE_CASE = value
_SCREAMING_SNAKE_CASE = current_time
if self.average_time_per_item is None:
_SCREAMING_SNAKE_CASE = 1
else:
_SCREAMING_SNAKE_CASE = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCamelCase ( self , A__ , A__=None ) -> List[Any]:
_SCREAMING_SNAKE_CASE = """ """ * (len(str(self.total ) ) - len(str(A__ ) )) + str(A__ )
if self.elapsed_time is None:
_SCREAMING_SNAKE_CASE = F"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
_SCREAMING_SNAKE_CASE = F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"
else:
_SCREAMING_SNAKE_CASE = (
F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"
F" {format_time(self.predicted_remaining )}"
)
self.label += F", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F", {self.comment}]"
self.display()
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_SCREAMING_SNAKE_CASE = disp.display(disp.HTML(self.html_code ) , display_id=A__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCamelCase ( self ) -> Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__=None ) -> Optional[int]:
super().__init__(A__ )
_SCREAMING_SNAKE_CASE = None if column_names is None else [column_names]
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_SCREAMING_SNAKE_CASE = disp.display(disp.HTML(self.html_code ) , display_id=A__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCamelCase ( self , A__ ) -> Any:
if self.inner_table is None:
_SCREAMING_SNAKE_CASE = [list(values.keys() ), list(values.values() )]
else:
_SCREAMING_SNAKE_CASE = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(A__ )
_SCREAMING_SNAKE_CASE = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCamelCase ( self , A__ , A__=None , A__=3_00 ) -> str:
_SCREAMING_SNAKE_CASE = NotebookProgressBar(A__ , prefix=A__ , parent=self , width=A__ )
return self.child_bar
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = None
self.display()
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
_SCREAMING_SNAKE_CASE = NotebookTrainingTracker(state.max_steps , A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = int(state.epoch ) if int(state.epoch ) == state.epoch else F"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 , comment=F"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , )
_SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self , A__ , A__ , A__ , A__=None , **A__ ) -> Union[str, Any]:
if not has_length(A__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_SCREAMING_SNAKE_CASE = self.training_tracker.add_child(len(A__ ) )
else:
_SCREAMING_SNAKE_CASE = NotebookProgressBar(len(A__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCamelCase ( self , A__ , A__ , A__ , **A__ ) -> List[Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self , A__ , A__ , A__ , A__=None , **A__ ) -> Optional[int]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_SCREAMING_SNAKE_CASE = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
_SCREAMING_SNAKE_CASE = state.global_step
self.training_tracker.write_line(A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , A__=None , **A__ ) -> Dict:
if self.training_tracker is not None:
_SCREAMING_SNAKE_CASE = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
_SCREAMING_SNAKE_CASE = log["""loss"""]
break
if self.first_column == "Epoch":
_SCREAMING_SNAKE_CASE = int(state.epoch )
else:
_SCREAMING_SNAKE_CASE = state.global_step
_SCREAMING_SNAKE_CASE = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
_SCREAMING_SNAKE_CASE = re.sub(R"""\_loss$""" , """""" , A__ )
_SCREAMING_SNAKE_CASE = metrics.pop("""total_flos""" , A__ )
_SCREAMING_SNAKE_CASE = metrics.pop("""epoch""" , A__ )
_SCREAMING_SNAKE_CASE = metrics.pop(F"{metric_key_prefix}_runtime" , A__ )
_SCREAMING_SNAKE_CASE = metrics.pop(F"{metric_key_prefix}_samples_per_second" , A__ )
_SCREAMING_SNAKE_CASE = metrics.pop(F"{metric_key_prefix}_steps_per_second" , A__ )
_SCREAMING_SNAKE_CASE = metrics.pop(F"{metric_key_prefix}_jit_compilation_time" , A__ )
for k, v in metrics.items():
if k == F"{metric_key_prefix}_loss":
_SCREAMING_SNAKE_CASE = v
else:
_SCREAMING_SNAKE_CASE = k.split("""_""" )
_SCREAMING_SNAKE_CASE = """ """.join([part.capitalize() for part in splits[1:]] )
_SCREAMING_SNAKE_CASE = v
self.training_tracker.write_line(A__ )
self.training_tracker.remove_child()
_SCREAMING_SNAKE_CASE = None
# Evaluation takes a long time so we should force the next update.
_SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self , A__ , A__ , A__ , **A__ ) -> Dict:
self.training_tracker.update(
state.global_step , comment=F"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=A__ )
_SCREAMING_SNAKE_CASE = None
| 0
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase__ : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCamelCase__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCamelCase__ : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a (datasets.Metric):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def UpperCamelCase ( self , A__ , A__ , A__=None ) -> List[str]:
return {
"matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ),
}
| 0
| 1
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowercase : Dict = False
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = """ybelkada/fonts"""
def lowerCamelCase_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def lowerCamelCase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ):
requires_backends(_lowerCamelCase , ['''torch'''] )
_check_torch_version()
lowerCamelCase_ = image_tensor.unsqueeze(0 )
lowerCamelCase_ = torch.nn.functional.unfold(_lowerCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowerCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _lowerCamelCase , _lowerCamelCase , -1 )
lowerCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : int = 3_6 , _lowerCamelCase : str = "black" , _lowerCamelCase : str = "white" , _lowerCamelCase : int = 5 , _lowerCamelCase : int = 5 , _lowerCamelCase : int = 5 , _lowerCamelCase : int = 5 , _lowerCamelCase : Optional[bytes] = None , _lowerCamelCase : Optional[str] = None , ):
requires_backends(_lowerCamelCase , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
lowerCamelCase_ = textwrap.TextWrapper(width=8_0 )
lowerCamelCase_ = wrapper.wrap(text=_lowerCamelCase )
lowerCamelCase_ = '''\n'''.join(_lowerCamelCase )
if font_bytes is not None and font_path is None:
lowerCamelCase_ = io.BytesIO(_lowerCamelCase )
elif font_path is not None:
lowerCamelCase_ = font_path
else:
lowerCamelCase_ = hf_hub_download(_lowerCamelCase , '''Arial.TTF''' )
lowerCamelCase_ = ImageFont.truetype(_lowerCamelCase , encoding='''UTF-8''' , size=_lowerCamelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowerCamelCase_ = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , _lowerCamelCase ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = temp_draw.textbbox((0, 0) , _lowerCamelCase , _lowerCamelCase )
# Create the actual image with a bit of padding around the text.
lowerCamelCase_ = text_width + left_padding + right_padding
lowerCamelCase_ = text_height + top_padding + bottom_padding
lowerCamelCase_ = Image.new('''RGB''' , (image_width, image_height) , _lowerCamelCase )
lowerCamelCase_ = ImageDraw.Draw(_lowerCamelCase )
draw.text(xy=(left_padding, top_padding) , text=_lowerCamelCase , fill=_lowerCamelCase , font=_lowerCamelCase )
return image
def lowerCamelCase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : str , **_lowerCamelCase : Optional[Any] ):
requires_backends(_lowerCamelCase , '''vision''' )
# Convert to PIL image if necessary
lowerCamelCase_ = to_pil_image(_lowerCamelCase )
lowerCamelCase_ = render_text(_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase_ = max(header_image.width , image.width )
lowerCamelCase_ = int(image.height * (new_width / image.width) )
lowerCamelCase_ = int(header_image.height * (new_width / header_image.width) )
lowerCamelCase_ = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowerCamelCase_ = to_numpy_array(_lowerCamelCase )
if infer_channel_dimension_format(_lowerCamelCase ) == ChannelDimension.LAST:
lowerCamelCase_ = to_channel_dimension_format(_lowerCamelCase , ChannelDimension.LAST )
return new_image
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Dict = ["flattened_patches"]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 2_048 , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowerCamelCase_ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
lowerCamelCase_ = do_normalize
lowerCamelCase_ = do_convert_rgb
lowerCamelCase_ = max_patches
lowerCamelCase_ = is_vqa
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> np.ndarray:
'''simple docstring'''
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
lowerCamelCase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
lowerCamelCase_ = torch.from_numpy(UpperCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = patch_size['''height'''], patch_size['''width''']
lowerCamelCase_ , lowerCamelCase_ = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
lowerCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowerCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
lowerCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
lowerCamelCase_ = max(num_feasible_rows * patch_height , 1 )
lowerCamelCase_ = max(num_feasible_cols * patch_width , 1 )
lowerCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowerCamelCase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = patches.shape
lowerCamelCase_ = patches_shape[1]
lowerCamelCase_ = patches_shape[2]
lowerCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowerCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowerCamelCase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
lowerCamelCase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowerCamelCase_ = row_ids.to(torch.floataa )
lowerCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowerCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowerCamelCase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowerCamelCase_ = to_numpy_array(UpperCamelCase__ )
return result
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ ) -> np.ndarray:
'''simple docstring'''
if image.dtype == np.uinta:
lowerCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowerCamelCase_ = np.mean(UpperCamelCase__ )
lowerCamelCase_ = np.std(UpperCamelCase__ )
lowerCamelCase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ) -> ImageInput:
'''simple docstring'''
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ = patch_size if patch_size is not None else self.patch_size
lowerCamelCase_ = max_patches if max_patches is not None else self.max_patches
lowerCamelCase_ = self.is_vqa
if kwargs.get('''data_format''' , UpperCamelCase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
lowerCamelCase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
lowerCamelCase_ = kwargs.pop('''font_bytes''' , UpperCamelCase__ )
lowerCamelCase_ = kwargs.pop('''font_path''' , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = [header_text] * len(UpperCamelCase__ )
lowerCamelCase_ = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
lowerCamelCase_ = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
lowerCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowerCamelCase_ = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 142
|
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__lowercase : List[str] = logging.getLogger(__name__)
__lowercase : Dict = 5_0 # max width of layer names
__lowercase : Any = 7_0 # max width of quantizer names
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=_lowerCamelCase , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=_lowerCamelCase , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=_lowerCamelCase , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=_lowerCamelCase , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=_lowerCamelCase , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=_lowerCamelCase , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase_ ( _lowerCamelCase : str ):
if args.calibrator == "max":
lowerCamelCase_ = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
lowerCamelCase_ = '''histogram'''
elif args.calibrator == "mse":
lowerCamelCase_ = '''histogram'''
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
lowerCamelCase_ = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
lowerCamelCase_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Any=False , _lowerCamelCase : List[Any]=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ['''embeddings'''] , which='''weight''' , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [''''''] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Dict ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ):
def fusea(_lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : str ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
lowerCamelCase_ = qq._amax.detach().item()
lowerCamelCase_ = qk._amax.detach().item()
lowerCamelCase_ = qv._amax.detach().item()
lowerCamelCase_ = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
lowerCamelCase_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
lowerCamelCase_ = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
lowerCamelCase_ = mod.weight.shape[0]
lowerCamelCase_ = mod._weight_quantizer._amax.detach()
lowerCamelCase_ = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCamelCase_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCamelCase_ = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCamelCase_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
lowerCamelCase_ = amax
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict=2_5 , _lowerCamelCase : List[Any]=1_8_0 , _lowerCamelCase : str=None ):
if ignore is None:
lowerCamelCase_ = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = [ignore]
lowerCamelCase_ = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , '''weight''' ):
continue
lowerCamelCase_ = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
lowerCamelCase_ = getattr(_lowerCamelCase , '''_input_quantizer''' , _lowerCamelCase )
lowerCamelCase_ = getattr(_lowerCamelCase , '''_weight_quantizer''' , _lowerCamelCase )
if not hasattr(_lowerCamelCase , '''weight''' ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
lowerCamelCase_ = F"""Act:{input_q.extra_repr()}"""
lowerCamelCase_ = F"""Wgt:{weight_q.extra_repr()}"""
lowerCamelCase_ = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def lowerCamelCase_ ( _lowerCamelCase : int ):
lowerCamelCase_ = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def lowerCamelCase_ ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]="both" , **_lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '''_input_quantizer''' , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '''_weight_quantizer''' , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , **_lowerCamelCase : List[str] ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '''_input_quantizer''' ) or hasattr(_lowerCamelCase , '''_weight_quantizer''' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
| 142
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A: List[Any] = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: List[Any] = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_A: Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 617
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A: Tuple = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : List[Any] = """timm_backbone"""
def __init__( self , __A=None , __A=3 , __A=True , __A=True , __A=None , **__A , ):
super().__init__(**__A )
__UpperCAmelCase = backbone
__UpperCAmelCase = num_channels
__UpperCAmelCase = features_only
__UpperCAmelCase = use_pretrained_backbone
__UpperCAmelCase = True
__UpperCAmelCase = out_indices if out_indices is not None else (-1,)
| 617
| 1
|
from datetime import datetime
import requests
def lowerCAmelCase_ ( A_):
UpperCamelCase__: List[Any] = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
UpperCamelCase__: List[Any] = requests.get(base_url + url).json()[0]["urls"][0]["src"]
return requests.get(A_).content
if __name__ == "__main__":
A__: Union[str, Any] = input('''Enter Video/IGTV url: ''').strip()
A__: Tuple = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 380
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__: Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__: Any = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__: Tuple = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
A__: List[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ElectraTokenizer
def __init__( self: List[str] , __lowerCamelCase: int=None , __lowerCamelCase: List[str]=None , __lowerCamelCase: str=True , __lowerCamelCase: Dict="[UNK]" , __lowerCamelCase: Tuple="[SEP]" , __lowerCamelCase: Optional[int]="[PAD]" , __lowerCamelCase: str="[CLS]" , __lowerCamelCase: List[str]="[MASK]" , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=None , **__lowerCamelCase: str , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase__: Any = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
UpperCamelCase__: Union[str, Any] = do_lower_case
UpperCamelCase__: str = strip_accents
UpperCamelCase__: Any = tokenize_chinese_chars
UpperCamelCase__: Optional[Any] = normalizer_class(**__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Tuple=None ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Tuple = [self.sep_token_id]
UpperCamelCase__: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 380
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = IFInpaintingSuperResolutionPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def _UpperCamelCase ( self ,A ,A=0 ):
if str(A ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(A )
else:
UpperCAmelCase = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase = floats_tensor((1, 3, 16, 16) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def _UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" )
def _UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _UpperCamelCase ( self ):
self._test_save_load_local()
def _UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 74
|
"""simple docstring"""
def _a ( _snake_case = 10 , _snake_case = 22 ):
"""simple docstring"""
UpperCAmelCase = range(1 , _snake_case )
UpperCAmelCase = range(1 , _snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 74
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase ( UpperCamelCase__ ):
_a = "big_bird"
def __init__( self , _a=5_0358 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=4096 , _a=2 , _a=0.02 , _a=1e-12 , _a=True , _a=0 , _a=1 , _a=2 , _a=66 , _a="block_sparse" , _a=True , _a=False , _a=64 , _a=3 , _a=None , **_a , ) -> Optional[int]:
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
_A : Tuple = vocab_size
_A : Dict = max_position_embeddings
_A : List[Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Tuple = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : str = hidden_act
_A : str = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : Dict = initializer_range
_A : int = type_vocab_size
_A : Tuple = layer_norm_eps
_A : Optional[Any] = use_cache
_A : List[str] = rescale_embeddings
_A : List[str] = attention_type
_A : int = use_bias
_A : Tuple = block_size
_A : Optional[int] = num_random_blocks
_A : Union[str, Any] = classifier_dropout
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 307
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_snake_case = datasets.load_iris()
_snake_case = np.array(data["data"])
_snake_case = np.array(data["target"])
_snake_case = data["target_names"]
_snake_case , _snake_case , _snake_case , _snake_case = train_test_split(X, y)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return np.linalg.norm(np.array(snake_case_ ) - np.array(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_=5 ):
_A : str = zip(snake_case_,snake_case_ )
# List of distances of all points from the point to be classified
_A : Union[str, Any] = []
for data_point in data:
_A : Any = euclidean_distance(data_point[0],snake_case_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_A : int = [i[1] for i in sorted(snake_case_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_A : Any = Counter(snake_case_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 307
| 1
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_torch
def __A ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCAmelCase : int = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_lowerCAmelCase : str = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_lowerCAmelCase : int = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_lowerCAmelCase : Optional[Any] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(a__ )
BertModel.from_pretrained(a__ )
BertTokenizer.from_pretrained(a__ )
pipeline(task="""fill-mask""" , model=a__ )
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : Any = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_lowerCAmelCase : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase : Dict = """1"""
_lowerCAmelCase : Optional[int] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __A ( self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCAmelCase : Dict = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_lowerCAmelCase : List[str] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_lowerCAmelCase : List[Any] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_lowerCAmelCase : Dict = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(a__ )
BertModel.from_pretrained(a__ )
BertTokenizer.from_pretrained(a__ )
pipeline(task="""fill-mask""" , model=a__ )
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : int = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_lowerCAmelCase : List[Any] = self.get_env()
_lowerCAmelCase : List[str] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __A ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCAmelCase : str = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_lowerCAmelCase : int = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_lowerCAmelCase : Union[str, Any] = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : int = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_lowerCAmelCase : Any = self.get_env()
_lowerCAmelCase : int = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
_lowerCAmelCase : Optional[int] = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase : Any = """1"""
_lowerCAmelCase : List[Any] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __A ( self ):
_lowerCAmelCase : Optional[Any] = """
from transformers import pipeline
"""
_lowerCAmelCase : List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_lowerCAmelCase : List[Any] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_lowerCAmelCase : List[Any] = self.get_env()
_lowerCAmelCase : Union[str, Any] = """1"""
_lowerCAmelCase : Tuple = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
_lowerCAmelCase : Tuple = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def __A ( self ):
_lowerCAmelCase : List[str] = """
from transformers import AutoModel
"""
_lowerCAmelCase : Optional[Any] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : Tuple = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_lowerCAmelCase : Dict = self.get_env()
_lowerCAmelCase : Tuple = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase : Dict = """1"""
_lowerCAmelCase : List[Any] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 707
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.dummy_uncond_unet
lowerCAmelCase__ :int = PNDMScheduler()
lowerCAmelCase__ :Any = PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pndm.to(__UpperCAmelCase )
pndm.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = pndm(generator=__UpperCAmelCase , num_inference_steps=2_0 , output_type='numpy' ).images
lowerCAmelCase__ :str = torch.manual_seed(0 )
lowerCAmelCase__ :Union[str, Any] = pndm(generator=__UpperCAmelCase , num_inference_steps=2_0 , output_type='numpy' , return_dict=__UpperCAmelCase )[0]
lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = 'google/ddpm-cifar10-32'
lowerCAmelCase__ :Optional[Any] = UNetaDModel.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = PNDMScheduler()
lowerCAmelCase__ :Dict = PNDMPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pndm.to(__UpperCAmelCase )
pndm.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ :str = pndm(generator=__UpperCAmelCase , output_type='numpy' ).images
lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :int = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93
|
_lowercase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_lowercase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] = True
lowerCamelCase__: Union[str, Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
order.append(_UpperCamelCase )
return order
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = True
lowerCamelCase__: Optional[int] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return component
def __lowerCAmelCase ( _UpperCamelCase ) -> list[list[int]]:
'''simple docstring'''
lowerCamelCase__: List[Any] = len(_UpperCamelCase ) * [False]
lowerCamelCase__: dict[int, list[int]] = {vert: [] for vert in range(len(_UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCamelCase )
lowerCamelCase__: Tuple = []
for i, was_visited in enumerate(_UpperCamelCase ):
if not was_visited:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase__: Tuple = []
lowerCamelCase__: Optional[Any] = len(_UpperCamelCase ) * [False]
for i in range(len(_UpperCamelCase ) ):
lowerCamelCase__: Optional[Any] = order[len(_UpperCamelCase ) - i - 1]
if not visited[vert]:
lowerCamelCase__: Optional[Any] = find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
components_list.append(_UpperCamelCase )
return components_list
| 306
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCamelCase : int = 2_00 ) -> int:
_lowerCamelCase = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
_lowerCamelCase = [0] * (pence + 1)
_lowerCamelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 700
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 234
| 0
|
def __UpperCAmelCase ( lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 7 , lowerCamelCase_ : int = 1_00_00_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Any = 1
for current_denominator in range(1 , limit + 1 ):
SCREAMING_SNAKE_CASE_ : List[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
SCREAMING_SNAKE_CASE_ : List[str] = current_numerator
SCREAMING_SNAKE_CASE_ : Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 105
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : str = tempfile.mkdtemp()
# fmt: off
a__ : Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
a__ : Tuple = dict(zip(lowercase , range(len(lowercase))))
a__ : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
a__ : Dict = {'unk_token': '<unk>'}
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase))
a__ : str = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Optional[Any] = os.path.join(self.tmpdirname , lowercase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(lowercase , lowercase)
def __lowercase ( self , **lowercase) -> str:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase)
def __lowercase ( self , **lowercase) -> int:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase)
def __lowercase ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ : Tuple = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : str = self.get_image_processor()
a__ : str = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
processor_slow.save_pretrained(self.tmpdirname)
a__ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase)
a__ : Tuple = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
processor_fast.save_pretrained(self.tmpdirname)
a__ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase)
self.assertIsInstance(processor_fast.tokenizer , lowercase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase)
self.assertIsInstance(processor_fast.image_processor , lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ : Any = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__ : int = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : str = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Optional[int] = self.prepare_image_inputs()
a__ : Optional[int] = image_processor(lowercase , return_tensors='np')
a__ : Optional[int] = processor(images=lowercase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Any = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Optional[Any] = 'lower newer'
a__ : Optional[int] = processor(text=lowercase)
a__ : Tuple = tokenizer(lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Union[str, Any] = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[str] = 'lower newer'
a__ : Tuple = self.prepare_image_inputs()
a__ : List[str] = processor(text=lowercase , images=lowercase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowercase):
processor()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = self.get_image_processor()
a__ : Dict = self.get_tokenizer()
a__ : Any = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[int] = processor.batch_decode(lowercase)
a__ : int = tokenizer.batch_decode(lowercase)
self.assertListEqual(lowercase , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : List[str] = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : int = 'lower newer'
a__ : List[Any] = self.prepare_image_inputs()
a__ : List[str] = processor(text=lowercase , images=lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 302
| 0
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> float | int:
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A = cst_fwd.get(UpperCamelCase__ , np.inf )
A = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A = new_cost_f
A = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = -1
A = set()
A = set()
A = {source: 0}
A = {destination: 0}
A = {source: None}
A = {destination: None}
A = PriorityQueue()
A = PriorityQueue()
A = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A , A = queue_forward.get()
visited_forward.add(UpperCamelCase__ )
A , A = queue_backward.get()
visited_backward.add(UpperCamelCase__ )
A = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
A = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A = shortest_distance
return shortest_path_distance
UpperCamelCase : Union[str, Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
UpperCamelCase : Dict = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Dict = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 91
| 0
|
import heapq
import sys
import numpy as np
__a: Dict = tuple[int, int]
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = set()
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
return len(self.elements ) == 0
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : int ) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
_UpperCAmelCase = []
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCamelCase ( self : int , lowerCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCamelCase )
_UpperCAmelCase = []
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.elements[0][1]
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
((_UpperCAmelCase) , (_UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> int:
# euclidean distance
_UpperCAmelCase = np.array(__snake_case )
_UpperCAmelCase = np.array(__snake_case )
return np.linalg.norm(a - b )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Union[str, Any]:
# integer division by time variable
return consistent_heuristic(__snake_case , __snake_case ) // t
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> List[str]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
_UpperCAmelCase = g_function[start] + Wa * heuristics[i](__snake_case , __snake_case )
return ans
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> str:
_UpperCAmelCase = np.chararray((n, n) )
for i in range(__snake_case ):
for j in range(__snake_case ):
_UpperCAmelCase = """*"""
for i in range(__snake_case ):
for j in range(__snake_case ):
if (j, (n - 1) - i) in blocks:
_UpperCAmelCase = """#"""
_UpperCAmelCase = """-"""
_UpperCAmelCase = back_pointer[goal]
while x != start:
((_UpperCAmelCase) , (_UpperCAmelCase)) = x
# print(x)
_UpperCAmelCase = """-"""
_UpperCAmelCase = back_pointer[x]
_UpperCAmelCase = """-"""
for i in range(__snake_case ):
for j in range(__snake_case ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
_UpperCAmelCase = back_pointer[goal]
while x != start:
print(__snake_case , end=""" """ )
_UpperCAmelCase = back_pointer[x]
print(__snake_case )
sys.exit()
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[str]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
for itera in range(__snake_case ):
open_list[itera].remove_element(__snake_case )
# print("s", s)
# print("j", j)
((_UpperCAmelCase) , (_UpperCAmelCase)) = s
_UpperCAmelCase = (x - 1, y)
_UpperCAmelCase = (x + 1, y)
_UpperCAmelCase = (x, y + 1)
_UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__snake_case ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__snake_case )
_UpperCAmelCase = -1
_UpperCAmelCase = float("""inf""" )
if valid(__snake_case ) and g_function[neighbours] > g_function[s] + 1:
_UpperCAmelCase = g_function[s] + 1
_UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(__snake_case , key(__snake_case , 0 , __snake_case , __snake_case ) )
if neighbours not in close_list_inad:
for var in range(1 , __snake_case ):
if key(__snake_case , __snake_case , __snake_case , __snake_case ) <= Wa * key(
__snake_case , 0 , __snake_case , __snake_case ):
open_list[j].put(
__snake_case , key(__snake_case , __snake_case , __snake_case , __snake_case ) )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__a: List[str] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__a: int = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__a: Any = make_common_ground()
__a: Optional[int] = blocks_blk
# hyper parameters
__a: Optional[Any] = 1
__a: Tuple = 1
__a: str = 20
__a: Dict = 3 # one consistent and two other inconsistent
# start and end destination
__a: List[str] = (0, 0)
__a: Union[str, Any] = (n - 1, n - 1)
__a: Dict = 1
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> Tuple:
_UpperCAmelCase = {start: 0, goal: float("""inf""" )}
_UpperCAmelCase = {start: -1, goal: -1}
_UpperCAmelCase = []
_UpperCAmelCase = set()
for i in range(__snake_case ):
open_list.append(PriorityQueue() )
open_list[i].put(__snake_case , key(__snake_case , __snake_case , __snake_case , __snake_case ) )
_UpperCAmelCase = []
_UpperCAmelCase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , __snake_case ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(__snake_case , __snake_case , __snake_case )
else:
_UpperCAmelCase , _UpperCAmelCase = open_list[i].top_show()
visited.add(__snake_case )
expand_state(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
close_list_inad.append(__snake_case )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(__snake_case , __snake_case , __snake_case )
else:
_UpperCAmelCase = open_list[0].top_show()
visited.add(__snake_case )
expand_state(
__snake_case , 0 , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
close_list_anchor.append(__snake_case )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__snake_case ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 108
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : Union[str, Any]=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : List[str]=5 , __UpperCAmelCase : str=4 , __UpperCAmelCase : int=37 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : str=128 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = NezhaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
_A = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
_A = True
_A = NezhaModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = NezhaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = NezhaForNextSentencePrediction(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = NezhaForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = NezhaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = self.num_labels
_A = NezhaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = self.num_labels
_A = NezhaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.num_choices
_A = NezhaForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case = True
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : int=False ):
'''simple docstring'''
_A = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
_A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = NezhaModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_A = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NezhaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_A = True
_A = model_class(config=__UpperCAmelCase )
_A = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_A = torch.jit.trace(
__UpperCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , "bert.pt" ) )
_A = torch.jit.load(os.path.join(__UpperCAmelCase , "bert.pt" ) , map_location=__UpperCAmelCase )
loaded(inputs_dict["input_ids"].to(__UpperCAmelCase ) , inputs_dict["attention_mask"].to(__UpperCAmelCase ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
_A = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
_A = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_A = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
_A = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , __UpperCAmelCase )
_A = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) )
| 330
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__A : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCamelCase ( _A : Optional[int] , _A : List[str] , _A : Tuple , _A : Dict , _A : List[str] ) ->Tuple:
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
if weight_type is not None:
lowerCamelCase_ =getattr(__snake_case , __snake_case ).shape
else:
lowerCamelCase_ =hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase_ =value
elif weight_type == "weight_g":
lowerCamelCase_ =value
elif weight_type == "weight_v":
lowerCamelCase_ =value
elif weight_type == "bias":
lowerCamelCase_ =value
else:
lowerCamelCase_ =value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __UpperCamelCase ( _A : List[str] , _A : Tuple ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =fairseq_model.state_dict()
lowerCamelCase_ =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCamelCase_ =None
for name, value in fairseq_dict.items():
lowerCamelCase_ =False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase_ =True
elif name.split(""".""" )[0] == "proj":
lowerCamelCase_ =fairseq_model.proj
lowerCamelCase_ =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCamelCase_ =True
if "*" in mapped_key:
lowerCamelCase_ =name.split(__snake_case )[0].split(""".""" )[-2]
lowerCamelCase_ =mapped_key.replace("""*""" , __snake_case )
if "weight_g" in name:
lowerCamelCase_ ="""weight_g"""
elif "weight_v" in name:
lowerCamelCase_ ="""weight_v"""
elif "bias" in name:
lowerCamelCase_ ="""bias"""
elif "weight" in name:
lowerCamelCase_ ="""weight"""
else:
lowerCamelCase_ =None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def __UpperCamelCase ( _A : Any , _A : int , _A : Tuple , _A : Union[str, Any] , _A : Dict ) ->int:
"""simple docstring"""
lowerCamelCase_ =full_name.split("""conv_layers.""" )[-1]
lowerCamelCase_ =name.split(""".""" )
lowerCamelCase_ =int(items[0] )
lowerCamelCase_ =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase_ =value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase_ =value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase_ =value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase_ =value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
def __UpperCamelCase ( _A : Optional[int] ) ->Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def __UpperCamelCase ( _A : str ) ->str:
"""simple docstring"""
with open(__snake_case , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase_ =f.readlines()
lowerCamelCase_ =[line.split(""" """ )[0] for line in lines]
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ ={
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCamelCase ( _A : List[Any] , _A : List[str] , _A : List[Any] , _A : Tuple , _A : Union[str, Any] , _A : str , _A : Union[str, Any] , ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =WavaVecaConfig.from_pretrained(__snake_case )
lowerCamelCase_ =SpeechaTextaConfig.from_pretrained(
__snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case )
lowerCamelCase_ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowerCamelCase_ =model[0].eval()
# set weights for wav2vec2 encoder
lowerCamelCase_ =WavaVecaModel(__snake_case )
lowerCamelCase_ =recursively_load_weights_wavaveca(model.encoder , __snake_case )
lowerCamelCase_ =SpeechaTextaForCausalLM(__snake_case )
lowerCamelCase_ , lowerCamelCase_ =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowerCamelCase_ =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCamelCase_ =SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
lowerCamelCase_ =False
# add projection layer
lowerCamelCase_ =nn.Parameter(projection_layer.weight )
lowerCamelCase_ =nn.Parameter(projection_layer.bias )
lowerCamelCase_ =create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case , """vocab.json""" ) , """w""" ) as fp:
json.dump(__snake_case , __snake_case )
lowerCamelCase_ =SpeechaTextaTokenizer(os.path.join(__snake_case , """vocab.json""" ) )
tokenizer.save_pretrained(__snake_case )
lowerCamelCase_ =hf_wavavec.config.to_dict()
lowerCamelCase_ =tokenizer.pad_token_id
lowerCamelCase_ =tokenizer.bos_token_id
lowerCamelCase_ =tokenizer.eos_token_id
lowerCamelCase_ ="""speech_to_text_2"""
lowerCamelCase_ ="""wav2vec2"""
lowerCamelCase_ =SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__A : List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 711
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__A : List[Any] = 'src/transformers'
__A : Tuple = 'docs/source/en'
__A : Optional[int] = '.'
def __UpperCamelCase ( _A : Tuple , _A : Tuple , _A : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.readlines()
# Find the start prompt.
lowerCamelCase_ =0
while not lines[start_index].startswith(_A ):
start_index += 1
start_index += 1
lowerCamelCase_ =start_index
while not lines[end_index].startswith(_A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__A : Dict = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__A : Optional[int] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__A : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__A : str = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__A : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def __UpperCamelCase ( _A : List[Any] ) ->str:
"""simple docstring"""
lowerCamelCase_ =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _A )
return [m.group(0 ) for m in matches]
def __UpperCamelCase ( _A : Union[str, Any] , _A : List[str] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =2 if text == """✅""" or text == """❌""" else len(_A )
lowerCamelCase_ =(width - text_length) // 2
lowerCamelCase_ =width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __UpperCamelCase ( ) ->Any:
"""simple docstring"""
lowerCamelCase_ =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ ={
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCamelCase_ ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCamelCase_ =collections.defaultdict(_A )
lowerCamelCase_ =collections.defaultdict(_A )
lowerCamelCase_ =collections.defaultdict(_A )
lowerCamelCase_ =collections.defaultdict(_A )
lowerCamelCase_ =collections.defaultdict(_A )
# Let's lookup through all transformers object (once).
for attr_name in dir(_A ):
lowerCamelCase_ =None
if attr_name.endswith("""Tokenizer""" ):
lowerCamelCase_ =slow_tokenizers
lowerCamelCase_ =attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowerCamelCase_ =fast_tokenizers
lowerCamelCase_ =attr_name[:-13]
elif _re_tf_models.match(_A ) is not None:
lowerCamelCase_ =tf_models
lowerCamelCase_ =_re_tf_models.match(_A ).groups()[0]
elif _re_flax_models.match(_A ) is not None:
lowerCamelCase_ =flax_models
lowerCamelCase_ =_re_flax_models.match(_A ).groups()[0]
elif _re_pt_models.match(_A ) is not None:
lowerCamelCase_ =pt_models
lowerCamelCase_ =_re_pt_models.match(_A ).groups()[0]
if lookup_dict is not None:
while len(_A ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCamelCase_ =True
break
# Try again after removing the last word in the name
lowerCamelCase_ ="""""".join(camel_case_split(_A )[:-1] )
# Let's build that table!
lowerCamelCase_ =list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCamelCase_ =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCamelCase_ =[len(_A ) + 2 for c in columns]
lowerCamelCase_ =max([len(_A ) for name in model_names] ) + 2
# Build the table per se
lowerCamelCase_ ="""|""" + """|""".join([_center_text(_A , _A ) for c, w in zip(_A , _A )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowerCamelCase_ ={True: """✅""", False: """❌"""}
for name in model_names:
lowerCamelCase_ =model_name_to_prefix[name]
lowerCamelCase_ =[
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_A , _A ) for l, w in zip(_A , _A )] ) + "|\n"
return table
def __UpperCamelCase ( _A : str=False ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =_find_text_in_file(
filename=os.path.join(_A , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowerCamelCase_ =get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_A , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__A : Any = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 75
| 0
|
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> float:
'''simple docstring'''
UpperCAmelCase__ : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _lowerCamelCase ( ) -> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'marian'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , snake_case_=5_81_01 , snake_case_=None , snake_case_=10_24 , snake_case_=12 , snake_case_=40_96 , snake_case_=16 , snake_case_=12 , snake_case_=40_96 , snake_case_=16 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=10_24 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=5_81_00 , snake_case_=False , snake_case_=5_81_00 , snake_case_=0 , snake_case_=0 , snake_case_=True , **snake_case_ , ):
lowercase =vocab_size
lowercase =decoder_vocab_size or vocab_size
lowercase =max_position_embeddings
lowercase =d_model
lowercase =encoder_ffn_dim
lowercase =encoder_layers
lowercase =encoder_attention_heads
lowercase =decoder_ffn_dim
lowercase =decoder_layers
lowercase =decoder_attention_heads
lowercase =dropout
lowercase =attention_dropout
lowercase =activation_dropout
lowercase =activation_function
lowercase =init_std
lowercase =encoder_layerdrop
lowercase =decoder_layerdrop
lowercase =use_cache
lowercase =encoder_layers
lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _A( self ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase ={0: '''batch'''}
lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase , lowercase =self.num_layers
for i in range(snake_case_ ):
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _A( self ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =super().outputs
else:
lowercase =super(snake_case_ , self ).outputs
if self.use_past:
lowercase , lowercase =self.num_layers
for i in range(snake_case_ ):
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
lowercase =seq_length if not self.use_past else 1
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
lowercase ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowercase =dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase =common_inputs['''input_ids'''].shape
lowercase =common_inputs['''decoder_input_ids'''].shape[1]
lowercase , lowercase =self.num_attention_heads
lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase =decoder_seq_length + 3
lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase =self.num_layers
lowercase =min(snake_case_ , snake_case_ )
lowercase =max(snake_case_ , snake_case_ ) - min_num_layers
lowercase ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
lowercase =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase =seqlen + 2
lowercase , lowercase =self.num_layers
lowercase , lowercase =self.num_attention_heads
lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase =common_inputs['''attention_mask'''].dtype
lowercase =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
lowercase =[
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase =compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase =tokenizer.num_special_tokens_to_add(snake_case_ )
lowercase =compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
lowercase =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase =dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
lowercase =self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
lowercase =super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
@property
def _A( self ):
return 1E-4
| 72
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27
| 1
|
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( _lowercase , _lowercase , _lowercase ):
UpperCAmelCase : int = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__(self : Dict , A__ : int , A__ : int , A__ : Optional[int] = None , A__ : int = 5_0_2_5_7 , A__ : int = 1_0_2_4 , A__ : int = 7_6_8 , A__ : int = 1_2 , A__ : int = 1_2 , A__ : Optional[int] = None , A__ : str = "gelu_new" , A__ : float = 0.1 , A__ : float = 0.1 , A__ : float = 0.1 , A__ : float = 1e-5 , A__ : float = 0.0_2 , A__ : bool = True , A__ : bool = True , A__ : bool = False , A__ : bool = False , ) -> Union[str, Any]:
super().__init__()
lowercase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
lowercase = prefix_inner_dim
lowercase = prefix_hidden_dim
lowercase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
lowercase = GPTaLMHeadModel(A__ )
def UpperCAmelCase__ (self : List[Any] , A__ : torch.Tensor , A__ : torch.Tensor , A__ : Optional[torch.Tensor] = None , A__ : Optional[torch.Tensor] = None , ) -> List[str]:
lowercase = self.transformer.transformer.wte(A__ )
lowercase = self.encode_prefix(A__ )
lowercase = self.decode_prefix(A__ )
lowercase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowercase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowercase = torch.cat((dummy_token, input_ids) , dim=1 )
lowercase = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase__ (self : Union[str, Any] , A__ : int , A__ : torch.device ) -> torch.Tensor:
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def UpperCAmelCase__ (self : int , A__ : int ) -> Dict:
return self.encode_prefix(A__ )
@torch.no_grad()
def UpperCAmelCase__ (self : Optional[int] , A__ : str , A__ : Optional[Any] , A__ : int ) -> Any:
lowercase = torch.split(A__ , 1 , dim=0 )
lowercase = []
lowercase = []
for feature in features:
lowercase = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
lowercase , lowercase = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowercase = torch.stack(A__ )
lowercase = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase__ (self : Any , A__ : int=None , A__ : Union[str, Any]=None , A__ : List[str]=None , A__ : int = 5 , A__ : int = 6_7 , A__ : float = 1.0 , A__ : Optional[int] = None , ) -> List[str]:
lowercase = eos_token_id
lowercase = None
lowercase = None
lowercase = torch.ones(A__ , device=A__ , dtype=torch.int )
lowercase = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
lowercase = input_embeds
else:
lowercase = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
lowercase = self.transformer(inputs_embeds=A__ )
lowercase = outputs.logits
lowercase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase = logits.softmax(-1 ).log()
if scores is None:
lowercase , lowercase = logits.topk(A__ , -1 )
lowercase = generated.expand(A__ , *generated.shape[1:] )
lowercase , lowercase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowercase = next_tokens
else:
lowercase = tokens.expand(A__ , *tokens.shape[1:] )
lowercase = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowercase = -float(np.inf )
lowercase = 0
lowercase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase = scores_sum / seq_lengths[:, None]
lowercase , lowercase = scores_sum_average.view(-1 ).topk(A__ , -1 )
lowercase = next_tokens // scores_sum.shape[1]
lowercase = seq_lengths[next_tokens_source]
lowercase = next_tokens % scores_sum.shape[1]
lowercase = next_tokens.unsqueeze(1 )
lowercase = tokens[next_tokens_source]
lowercase = torch.cat((tokens, next_tokens) , dim=1 )
lowercase = generated[next_tokens_source]
lowercase = scores_sum_average * seq_lengths
lowercase = is_stopped[next_tokens_source]
lowercase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowercase = torch.cat((generated, next_token_embed) , dim=1 )
lowercase = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
lowercase = scores / seq_lengths
lowercase = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
lowercase = [tokens[i] for i in order]
lowercase = torch.stack(A__ , dim=0 )
lowercase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 310
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__(self : Tuple , A__ : int , A__ : Any=7 , A__ : str=3 , A__ : Dict=1_8 , A__ : Union[str, Any]=3_0 , A__ : List[Any]=4_0_0 , A__ : Dict=True , A__ : Union[str, Any]=None , A__ : Dict=True , A__ : int=None , A__ : int=True , A__ : Union[str, Any]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , A__ : Optional[int]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , A__ : int=True , ) -> List[str]:
lowercase = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
lowercase = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_convert_rgb
def UpperCAmelCase__ (self : str ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ (self : Any , A__ : List[str]=False , A__ : Union[str, Any]=False , A__ : int=False ) -> str:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowercase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowercase = []
for i in range(self.batch_size ):
lowercase , lowercase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowercase = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowercase = [torch.from_numpy(A__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ (self : Tuple ) -> Any:
lowercase = ChineseCLIPImageProcessingTester(self , do_center_crop=A__ )
@property
def UpperCAmelCase__ (self : Union[str, Any] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self : List[str] ) -> Dict:
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
self.assertTrue(hasattr(A__ , "do_center_crop" ) )
self.assertTrue(hasattr(A__ , "center_crop" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_convert_rgb" ) )
def UpperCAmelCase__ (self : Optional[int] ) -> Any:
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def UpperCAmelCase__ (self : str ) -> Union[str, Any]:
pass
def UpperCAmelCase__ (self : int ) -> Optional[int]:
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase__ (self : Optional[int] ) -> Optional[int]:
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase__ (self : Tuple ) -> Tuple:
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class UpperCAmelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ (self : Union[str, Any] ) -> Any:
lowercase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A__ )
lowercase = 3
@property
def UpperCAmelCase__ (self : Any ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self : List[str] ) -> Tuple:
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
self.assertTrue(hasattr(A__ , "do_center_crop" ) )
self.assertTrue(hasattr(A__ , "center_crop" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_convert_rgb" ) )
def UpperCAmelCase__ (self : List[Any] ) -> str:
pass
def UpperCAmelCase__ (self : Dict ) -> Tuple:
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 310
| 1
|
UpperCAmelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
'''simple docstring'''
_A= True
_A= []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowercase , _lowercase , _lowercase )
order.append(_lowercase )
return order
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
'''simple docstring'''
_A= True
_A= [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowercase , _lowercase , _lowercase )
return component
def UpperCamelCase ( lowerCAmelCase_ ) -> str:
'''simple docstring'''
_A= len(_lowercase ) * [False]
_A= {vert: [] for vert in range(len(_lowercase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowercase )
_A= []
for i, was_visited in enumerate(_lowercase ):
if not was_visited:
order += topology_sort(_lowercase , _lowercase , _lowercase )
_A= []
_A= len(_lowercase ) * [False]
for i in range(len(_lowercase ) ):
_A= order[len(_lowercase ) - i - 1]
if not visited[vert]:
_A= find_components(_lowercase , _lowercase , _lowercase )
components_list.append(_lowercase )
return components_list
| 718
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ = True
except (ImportError, AttributeError):
UpperCAmelCase_ = object
def UpperCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
'''simple docstring'''
pass
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger('''transformers-cli/serving''')
def UpperCamelCase ( lowerCAmelCase_ ) -> int:
'''simple docstring'''
_A= pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCAmelCase_ , args.host , args.port , args.workers )
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : dict
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[str]
_SCREAMING_SNAKE_CASE : Optional[List[int]]
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : str
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Any
class lowerCAmelCase ( _a ):
@staticmethod
def a__ ( lowerCAmelCase__ ):
_A= parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase__ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase__ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase__ , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase__ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase__ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase__ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase__ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_A= pipeline
_A= host
_A= port
_A= workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"Serving model over {host}:{port}" )
_A= FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
] , timeout=600 , )
def a__ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def a__ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def a__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ):
try:
_A= self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
_A= self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
def a__ ( self , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , lowerCAmelCase__ = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ):
try:
_A= self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
async def a__ ( self , lowerCAmelCase__=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ):
# Check we don't have empty string
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_A= self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(500 , {'error': str(lowerCAmelCase__ )} )
| 476
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__UpperCAmelCase = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
__UpperCAmelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Any = MBartTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, src_lang=SCREAMING_SNAKE_CASE_, tgt_lang=SCREAMING_SNAKE_CASE_, additional_special_tokens=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Union[str, Any] = vocab_file
UpperCamelCase : List[Any] = False if not self.vocab_file else True
UpperCamelCase : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase : Tuple = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase : Any = src_lang if src_lang is not None else 'en_XX'
UpperCamelCase : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : Union[str, Any] = [self.sep_token_id]
UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase : List[Any] = src_lang
UpperCamelCase : List[str] = self(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = tgt_lang_id
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "en_XX", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "ro_RO", **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
UpperCamelCase : Tuple = src_lang
UpperCamelCase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = []
UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Optional[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = []
UpperCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 327
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__UpperCamelCase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowercase () -> Union[str, Any]:
SCREAMING_SNAKE_CASE = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE = get_cluster_input()
return config
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple=None ) -> int:
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('config' , description=SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate config command' , description=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE = args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(SCREAMING_SNAKE_CASE_ )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE_ )
print(F'accelerate configuration saved at {config_file}' )
def lowercase () -> str:
SCREAMING_SNAKE_CASE = config_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
config_command(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 327
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase__ = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = 'ernie_m'
__UpperCAmelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__(self : Any , __UpperCAmelCase : int = 2_5_0_0_0_2 , __UpperCAmelCase : int = 7_6_8 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 3_0_7_2 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 5_1_4 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 1E-05 , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Any=0.0 , **__UpperCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = classifier_dropout
UpperCAmelCase__ = is_decoder
UpperCAmelCase__ = act_dropout
| 486
| 0
|
'''simple docstring'''
import numpy
class _snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : numpy.ndarray , UpperCamelCase_ : numpy.ndarray ):
lowerCAmelCase_ : List[str] =input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase_ : str =numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase_ : List[str] =numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase_ : Optional[int] =numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase_ : Optional[int] =output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase_ : int =numpy.zeros(output_array.shape )
def __A ( self : str ):
lowerCAmelCase_ : Optional[int] =sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase_ : str =sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase_ : Optional[Any] =sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] =numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase_ : List[str] =numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase_ : int =numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : List[Any] , UpperCamelCase_ : numpy.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase_ : str =self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase_ : Any =numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'Iteration {iteration} Loss: {loss}' )
def __A ( self : Union[str, Any] , UpperCamelCase_ : numpy.ndarray ):
lowerCAmelCase_ : Optional[int] =input_arr
lowerCAmelCase_ : List[str] =sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase_ : Tuple =sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase_ : List[Any] =sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
return 1 / (1 + numpy.exp(-value ))
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
return (value) * (1 - (value))
def SCREAMING_SNAKE_CASE__ ( ):
lowerCAmelCase_ : Optional[int] =numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase_ : Tuple =numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase_ : List[Any] =TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=10 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 700
|
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowercase = HfArgumentParser(InitializationArguments)
__lowercase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowercase = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
__lowercase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowercase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 305
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 560
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = "SpeechT5FeatureExtractor"
lowerCAmelCase : Optional[Any] = "SpeechT5Tokenizer"
def __init__( self : Optional[int] ,_snake_case : Union[str, Any] ,_snake_case : str ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case ,_snake_case )
def __call__( self : str ,*_snake_case : str ,**_snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = kwargs.pop('''audio''' ,_snake_case )
lowercase__ : Union[str, Any] = kwargs.pop('''text''' ,_snake_case )
lowercase__ : Tuple = kwargs.pop('''text_target''' ,_snake_case )
lowercase__ : str = kwargs.pop('''audio_target''' ,_snake_case )
lowercase__ : Union[str, Any] = kwargs.pop('''sampling_rate''' ,_snake_case )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase__ : Optional[Any] = self.feature_extractor(_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
elif text is not None:
lowercase__ : Dict = self.tokenizer(_snake_case ,**_snake_case )
else:
lowercase__ : int = None
if audio_target is not None:
lowercase__ : Tuple = self.feature_extractor(audio_target=_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
lowercase__ : List[Any] = targets['''input_values''']
elif text_target is not None:
lowercase__ : int = self.tokenizer(_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = targets['''input_ids''']
else:
lowercase__ : List[str] = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : Tuple = labels
lowercase__ : Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ : str = decoder_attention_mask
return inputs
def UpperCAmelCase ( self : str ,*_snake_case : List[Any] ,**_snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = kwargs.pop('''input_values''' ,_snake_case )
lowercase__ : Any = kwargs.pop('''input_ids''' ,_snake_case )
lowercase__ : List[Any] = kwargs.pop('''labels''' ,_snake_case )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase__ : List[Any] = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
elif input_ids is not None:
lowercase__ : Union[str, Any] = self.tokenizer.pad(_snake_case ,**_snake_case )
else:
lowercase__ : int = None
if labels is not None:
if "input_ids" in labels or (isinstance(_snake_case ,_snake_case ) and "input_ids" in labels[0]):
lowercase__ : List[Any] = self.tokenizer.pad(_snake_case ,**_snake_case )
lowercase__ : Optional[int] = targets['''input_ids''']
else:
lowercase__ : int = self.feature_extractor.feature_size
lowercase__ : str = self.feature_extractor.num_mel_bins
lowercase__ : int = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
lowercase__ : Tuple = feature_size_hack
lowercase__ : int = targets['''input_values''']
else:
lowercase__ : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : str = labels
lowercase__ : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase ( self : Optional[int] ,*_snake_case : List[Any] ,**_snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[str] ,*_snake_case : Dict ,**_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
| 560
| 1
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCamelCase_ : int = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
lowerCamelCase_ : int = '''>>zh<<'''
lowerCamelCase_ : Tuple = '''Helsinki-NLP/'''
if is_torch_available():
lowerCamelCase_ : Optional[Any] = '''pt'''
elif is_tf_available():
lowerCamelCase_ : str = '''tf'''
else:
lowerCamelCase_ : Any = '''jax'''
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
__a : Optional[Any] = MarianTokenizer
__a : Optional[int] = False
__a : List[Any] = True
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
super().setUp()
UpperCamelCase__ = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
UpperCamelCase__ = dict(zip(lowercase , range(len(lowercase ) ) ) )
UpperCamelCase__ = Path(self.tmpdirname )
save_json(lowercase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
UpperCamelCase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : str , **lowercase : List[Any] ) -> MarianTokenizer:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Tuple , lowercase : str ) -> Optional[int]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def A ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = """</s>"""
UpperCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase ) , 9 )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def A ( self : int ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
UpperCamelCase__ = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
UpperCamelCase__ = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(lowercase , batch.input_ids[0] )
UpperCamelCase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase )
UpperCamelCase__ = [x.name for x in Path(lowercase ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase )
MarianTokenizer.from_pretrained(lowercase )
def A ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tok(
["""I am a small frog""" * 1_0_0_0, """I am a small frog"""] , padding=lowercase , truncation=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def A ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = {"""input_ids""": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def A ( self : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
UpperCamelCase__ = """Tämä on testi"""
UpperCamelCase__ = """This is a test"""
UpperCamelCase__ = [7_6, 7, 2_0_4_7, 2]
UpperCamelCase__ = [6_9, 1_2, 1_1, 9_4_0, 2]
UpperCamelCase__ = tokenizer(lowercase ).input_ids
self.assertListEqual(lowercase , lowercase )
UpperCamelCase__ = tokenizer(text_target=lowercase ).input_ids
self.assertListEqual(lowercase , lowercase )
UpperCamelCase__ = tokenizer.decode(lowercase , skip_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
| 707
|
'''simple docstring'''
from collections import defaultdict
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_A )
if ret % 2 == 0:
cuts.append(_A )
return ret
def __magic_name__( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
lowerCamelCase_ , lowerCamelCase_ : int = 10, 9
lowerCamelCase_ : int = defaultdict(list)
lowerCamelCase_ : dict[int, bool] = {}
lowerCamelCase_ : list[int] = []
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 265
| 0
|
def UpperCAmelCase_ ( __UpperCamelCase ):
if not isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =f"""Input value of [number={number}] must be an integer"""
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 0:
return False
SCREAMING_SNAKE_CASE__ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
a_ :List[str] ="""nat"""
a_ :int ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[2, 4, 8, 1_6] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : int=3.0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.0_2 , SCREAMING_SNAKE_CASE__ : Tuple=1E-5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = len(SCREAMING_SNAKE_CASE__ )
__a = num_heads
__a = kernel_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = layer_norm_eps
__a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
__a = layer_scale_init_value
__a = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
| 582
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ = 2_56
class snake_case_ ( __lowerCamelCase ):
'''simple docstring'''
__UpperCamelCase = ['melgan']
def __init__( self, A_, A_, A_, A_, A_, ) -> Optional[Any]:
super().__init__()
# From MELGAN
UpperCAmelCase__ =math.log(1E-5 ) # Matches MelGAN training.
UpperCAmelCase__ =4.0 # Largest value for most examples
UpperCAmelCase__ =128
self.register_modules(
notes_encoder=UpperCAmelCase_, continuous_encoder=UpperCAmelCase_, decoder=UpperCAmelCase_, scheduler=UpperCAmelCase_, melgan=UpperCAmelCase_, )
def __UpperCAmelCase ( self, A_, A_=(-1.0, 1.0), A_=False ) -> Union[str, Any]:
UpperCAmelCase__ =output_range
if clip:
UpperCAmelCase__ =torch.clip(UpperCAmelCase_, self.min_value, self.max_value )
# Scale to [0, 1].
UpperCAmelCase__ =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCAmelCase ( self, A_, A_=(-1.0, 1.0), A_=False ) -> Optional[Any]:
UpperCAmelCase__ =input_range
UpperCAmelCase__ =torch.clip(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) if clip else outputs
# Scale to [0, 1].
UpperCAmelCase__ =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =input_tokens > 0
UpperCAmelCase__ =self.notes_encoder(
encoder_input_tokens=UpperCAmelCase_, encoder_inputs_mask=UpperCAmelCase_ )
UpperCAmelCase__ =self.continuous_encoder(
encoder_inputs=UpperCAmelCase_, encoder_inputs_mask=UpperCAmelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Any:
UpperCAmelCase__ =noise_time
if not torch.is_tensor(UpperCAmelCase_ ):
UpperCAmelCase__ =torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device )
elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0:
UpperCAmelCase__ =timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase__ =timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device )
UpperCAmelCase__ =self.decoder(
encodings_and_masks=UpperCAmelCase_, decoder_input_tokens=UpperCAmelCase_, decoder_noise_time=UpperCAmelCase_ )
return logits
@torch.no_grad()
def __call__( self, A_, A_ = None, A_ = 100, A_ = True, A_ = "numpy", A_ = None, A_ = 1, ) -> Any:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_, UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(UpperCAmelCase_ )}.""" )
UpperCAmelCase__ =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.floataa )
UpperCAmelCase__ =np.zeros([1, 0, self.n_dims], np.floataa )
UpperCAmelCase__ =torch.ones((1, TARGET_FEATURE_LENGTH), dtype=UpperCAmelCase_, device=self.device )
for i, encoder_input_tokens in enumerate(UpperCAmelCase_ ):
if i == 0:
UpperCAmelCase__ =torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device, dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCAmelCase__ =torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=UpperCAmelCase_, device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCAmelCase__ =ones
UpperCAmelCase__ =self.scale_features(
UpperCAmelCase_, output_range=[-1.0, 1.0], clip=UpperCAmelCase_ )
UpperCAmelCase__ =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ), continuous_inputs=UpperCAmelCase_, continuous_mask=UpperCAmelCase_, )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCAmelCase__ =randn_tensor(
shape=encoder_continuous_inputs.shape, generator=UpperCAmelCase_, device=self.device, dtype=self.decoder.dtype, )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ =self.decode(
encodings_and_masks=UpperCAmelCase_, input_tokens=UpperCAmelCase_, noise_time=t / self.scheduler.config.num_train_timesteps, )
# Compute previous output: x_t -> x_t-1
UpperCAmelCase__ =self.scheduler.step(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, generator=UpperCAmelCase_ ).prev_sample
UpperCAmelCase__ =self.scale_to_features(UpperCAmelCase_, input_range=[-1.0, 1.0] )
UpperCAmelCase__ =mel[:1]
UpperCAmelCase__ =mel.cpu().float().numpy()
UpperCAmelCase__ =np.concatenate([full_pred_mel, pred_mel[:1]], axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_, UpperCAmelCase_ )
logger.info("Generated segment", UpperCAmelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'." )
if output_type == "numpy":
UpperCAmelCase__ =self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCAmelCase__ =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCAmelCase_ )
| 706
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'glpn'
def __init__( self, A_=3, A_=4, A_=[2, 2, 2, 2], A_=[8, 4, 2, 1], A_=[32, 64, 160, 256], A_=[7, 3, 3, 3], A_=[4, 2, 2, 2], A_=[1, 2, 5, 8], A_=[4, 4, 4, 4], A_="gelu", A_=0.0, A_=0.0, A_=0.02, A_=0.1, A_=1E-6, A_=64, A_=10, A_=-1, **A_, ) -> Tuple:
super().__init__(**A_ )
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =num_encoder_blocks
UpperCAmelCase__ =depths
UpperCAmelCase__ =sr_ratios
UpperCAmelCase__ =hidden_sizes
UpperCAmelCase__ =patch_sizes
UpperCAmelCase__ =strides
UpperCAmelCase__ =mlp_ratios
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =drop_path_rate
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =decoder_hidden_size
UpperCAmelCase__ =max_depth
UpperCAmelCase__ =head_in_index
| 510
| 0
|
from __future__ import annotations
UpperCamelCase = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class _A :
def __init__( self : List[str] , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ):
"""simple docstring"""
__UpperCamelCase : List[str] = graph
# mapping node to its parent in resulting breadth first tree
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : Any = source_vertex
def a ( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase : int = {self.source_vertex}
__UpperCamelCase : str = None
__UpperCamelCase : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
__UpperCamelCase : Dict = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCAmelCase_ )
__UpperCamelCase : Any = vertex
queue.append(lowerCAmelCase_ )
def a ( self : int , lowerCamelCase__ : str ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
__UpperCamelCase : Optional[int] = self.parent.get(lowerCAmelCase_ )
if target_vertex_parent is None:
__UpperCamelCase : Optional[Any] = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(lowerCAmelCase_ )
return self.shortest_path(lowerCAmelCase_ ) + f'->{target_vertex}'
if __name__ == "__main__":
UpperCamelCase = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 269
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A_ = "sshleifer/bart-tiny-random"
A_ = "patrickvonplaten/t5-tiny-random"
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return AutoConfig.from_pretrained(lowerCAmelCase_ )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase_ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=lowerCAmelCase_ , d=lowerCAmelCase_ )
| 393
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> List[Any]:
lowerCAmelCase__ = path_or_paths
lowerCAmelCase__ = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else '''train'''
lowerCAmelCase__ = features
lowerCAmelCase__ = cache_dir
lowerCAmelCase__ = keep_in_memory
lowerCAmelCase__ = streaming
lowerCAmelCase__ = num_proc
lowerCAmelCase__ = kwargs
@abstractmethod
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> Optional[Any]:
lowerCAmelCase__ = features
lowerCAmelCase__ = cache_dir
lowerCAmelCase__ = keep_in_memory
lowerCAmelCase__ = streaming
lowerCAmelCase__ = num_proc
lowerCAmelCase__ = kwargs
@abstractmethod
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
| 715
|
'''simple docstring'''
def _snake_case ( A , A ) -> bool:
lowerCAmelCase__ = len(A ) + 1
lowerCAmelCase__ = len(A ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ = [[0 for i in range(A )] for j in range(A )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , A ):
lowerCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , A ):
lowerCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , A ):
for j in range(1 , A ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ = dp[i - 1][j]
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__UpperCAmelCase = '''aab'''
__UpperCAmelCase = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 98
| 0
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 50 ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 46
|
'''simple docstring'''
from math import isqrt, loga
def lowerCamelCase__ ( a ):
__snake_case = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a , a ):
__snake_case = False
return [i for i in range(2 , a ) if is_prime[i]]
def lowerCamelCase__ ( a = 800800 , a = 800800 ):
__snake_case = degree * loga(a )
__snake_case = int(a )
__snake_case = calculate_prime_numbers(a )
__snake_case = 0
__snake_case = 0
__snake_case = len(a ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 356
| 0
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
return 0.0
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCamelCase_ = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = 5_1_2
lowerCamelCase_ = [1] + [0] * (size - 1)
lowerCamelCase_ = [filter_type.process(lowerCamelCase_ ) for item in inputs]
lowerCamelCase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ = np.abs(np.fft.fft(lowerCamelCase_ ) )
lowerCamelCase_ = 2_0 * np.logaa(lowerCamelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowerCamelCase_ = get_bounds(lowerCamelCase_ , lowerCamelCase_ )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(lowerCamelCase_ )
plt.show()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = 5_1_2
lowerCamelCase_ = [1] + [0] * (size - 1)
lowerCamelCase_ = [filter_type.process(lowerCamelCase_ ) for item in inputs]
lowerCamelCase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ = np.angle(np.fft.fft(lowerCamelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(lowerCamelCase_ , -2 * pi ) )
plt.show()
| 713
|
from itertools import count
def lowerCamelCase_ ( lowerCamelCase__ = 5_0 ):
lowerCamelCase_ = [1] * min_block_length
for n in count(lowerCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 313
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : int = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__( lowerCamelCase__ ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BridgeTowerImageProcessor"""
lowercase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : int , __snake_case : str , __snake_case : List[str] ):
super().__init__(__snake_case , __snake_case )
def __call__( self : int , __snake_case : Optional[Any] , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[Any] , ):
a : Optional[int] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel_values + pixel_mask
a : List[str] = self.image_processor(
__snake_case , return_tensors=__snake_case , do_normalize=__snake_case , do_center_crop=__snake_case , **__snake_case )
encoding.update(__snake_case )
return encoding
def lowercase_ ( self : int , *__snake_case : List[str] , **__snake_case : List[str] ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowercase_ ( self : List[str] , *__snake_case : Tuple , **__snake_case : Union[str, Any] ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = self.tokenizer.model_input_names
a : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 526
| 0
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A = '<<<<<<< This should probably be modified because it mentions: '
A = '=======\n>>>>>>>\n'
A = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
A = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def a(lowercase__ ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
snake_case_ = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=__UpperCamelCase )
def __init__( self , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase ):
"""simple docstring"""
snake_case_ = get_logger('datasets-cli/converting' )
snake_case_ = tfds_path
snake_case_ = datasets_directory
def __lowerCAmelCase ( self ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
snake_case_ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
snake_case_ = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
snake_case_ = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
snake_case_ = []
snake_case_ = []
snake_case_ = {}
if os.path.isdir(self._tfds_path ):
snake_case_ = os.listdir(__UpperCamelCase )
else:
snake_case_ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
snake_case_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
snake_case_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
if not os.path.isfile(__UpperCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(__UpperCamelCase , encoding='utf-8' ) as f:
snake_case_ = f.readlines()
snake_case_ = []
snake_case_ = False
snake_case_ = False
snake_case_ = []
for line in lines:
snake_case_ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case_ = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
snake_case_ = ''
continue
elif "from absl import logging" in out_line:
snake_case_ = 'from datasets import logging\n'
elif "getLogger" in out_line:
snake_case_ = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
snake_case_ = True
snake_case_ = list(filter(lambda __UpperCamelCase : e in out_line , __UpperCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__UpperCamelCase ) + '\n' )
out_lines.append(__UpperCamelCase )
out_lines.append(__UpperCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case_ = re.sub(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case_ = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , __UpperCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
snake_case_ = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case_ = True
out_lines.append(__UpperCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case_ = f_name.replace('.py' , '' )
snake_case_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
snake_case_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__UpperCamelCase )
if needs_manual_update:
with_manual_update.append(__UpperCamelCase )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(__UpperCamelCase )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
snake_case_ = os.path.basename(__UpperCamelCase )
snake_case_ = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__UpperCamelCase , __UpperCamelCase )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 46
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=8 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=16 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=36 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.get_config()
snake_case_ = 3_00
return config
def __lowerCAmelCase ( self ):
"""simple docstring"""
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = self.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
snake_case_ = True
snake_case_ = MraModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MraForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MraForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = MraForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__A = False
__A = False
__A = False
__A = False
__A = ()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MraModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='MRA does not output attentions' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
snake_case_ = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
snake_case_ = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = 5_02_65
snake_case_ = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
snake_case_ = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = 5_02_65
snake_case_ = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46
| 1
|
from __future__ import annotations
SCREAMING_SNAKE_CASE = list[list[int]]
# assigning initial values to the grid
SCREAMING_SNAKE_CASE = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
SCREAMING_SNAKE_CASE = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a (lowerCAmelCase__ ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a (lowerCAmelCase__ ):
if location := find_empty_location(lowerCAmelCase__ ):
__a , __a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = digit
if sudoku(lowerCAmelCase__ ) is not None:
return grid
__a = 0
return None
def a (lowerCAmelCase__ ):
for row in grid:
for cell in row:
print(lowerCAmelCase__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 2_0)
print_solution(example_grid)
print('\nExample grid solution:')
SCREAMING_SNAKE_CASE = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 99
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 528
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
A = logging.getLogger(__name__)
torch.set_grad_enabled(False)
A = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__=100 , lowerCamelCase__=" " ) -> List[str]:
A = text.split(lowerCamelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ )]
def lowerCAmelCase__ ( lowerCamelCase__ ) -> dict:
A , A = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(lowerCamelCase__ ):
titles.append(title if title is not None else '' )
texts.append(lowerCamelCase__ )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> dict:
A = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=lowerCamelCase__ , padding='longest' , return_tensors='pt' )['input_ids']
A = ctx_encoder(input_ids.to(device=lowerCamelCase__ ) , return_dict=lowerCamelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> List[Any]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
A = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
A = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
A = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCamelCase__ )
A = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
A = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
A = dataset.map(
partial(lowerCamelCase__ , ctx_encoder=lowerCamelCase__ , ctx_tokenizer=lowerCamelCase__ ) , batched=lowerCamelCase__ , batch_size=processing_args.batch_size , features=lowerCamelCase__ , )
# And finally save your dataset
A = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(lowerCamelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
A = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=lowerCamelCase__ )
# And save the index
A = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(lowerCamelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ : str = field(
default=str(Path(UpperCamelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) ,metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} ,)
lowerCAmelCase_ : Optional[str] = field(
default=UpperCamelCase ,metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} ,)
lowerCAmelCase_ : str = field(
default="""facebook/rag-sequence-nq""" ,metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} ,)
lowerCAmelCase_ : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" ,metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} ,)
lowerCAmelCase_ : Optional[str] = field(
default=str(Path(UpperCamelCase ).parent / """test_run""" / """dummy-kb""" ) ,metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} ,)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ : Optional[int] = field(
default=UpperCamelCase ,metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} ,)
lowerCAmelCase_ : int = field(
default=16 ,metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} ,)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ : int = field(
default=7_68 ,metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} ,)
lowerCAmelCase_ : int = field(
default=1_28 ,metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
A = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
A , A , A = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
A = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 109
|
"""simple docstring"""
def lowerCAmelCase__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
A = 0
A = str(lowerCamelCase__ )
while len(lowerCamelCase__ ) != 1:
A = [int(lowerCamelCase__ ) for i in num_string]
A = 1
for i in range(0 , len(lowerCamelCase__ ) ):
total *= numbers[i]
A = str(lowerCamelCase__ )
steps += 1
return steps
def lowerCAmelCase__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
A = 0
A = str(lowerCamelCase__ )
while len(lowerCamelCase__ ) != 1:
A = [int(lowerCamelCase__ ) for i in num_string]
A = 0
for i in range(0 , len(lowerCamelCase__ ) ):
total += numbers[i]
A = str(lowerCamelCase__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Dict = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 604
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : Union[str, Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : Union[str, Any] = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
_lowerCAmelCase : int = "▁"
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = BarthezTokenizer
def __init__( self ,a_=None ,a_=None ,a_="<s>" ,a_="</s>" ,a_="</s>" ,a_="<s>" ,a_="<unk>" ,a_="<pad>" ,a_="<mask>" ,**a_ ,):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else mask_token
super().__init__(
a_ ,tokenizer_file=a_ ,bos_token=a_ ,eos_token=a_ ,unk_token=a_ ,sep_token=a_ ,cls_token=a_ ,pad_token=a_ ,mask_token=a_ ,**a_ ,)
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file ,a_ )
return (out_vocab_file,)
| 604
| 1
|
from collections import defaultdict
from math import ceil, sqrt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1000000 , SCREAMING_SNAKE_CASE__ = 10 ):
snake_case_ = defaultdict(SCREAMING_SNAKE_CASE__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 39
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
UpperCamelCase_ : Optional[int] = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
UpperCamelCase_ : str = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
UpperCamelCase_ : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCamelCase_ : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCamelCase_ : List[Any] = value
elif weight_type == "weight_v":
UpperCamelCase_ : Any = value
elif weight_type == "bias":
UpperCamelCase_ : int = value
else:
UpperCamelCase_ : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : List[Any] ):
UpperCamelCase_ : Union[str, Any] = []
UpperCamelCase_ : Optional[int] = fairseq_model.state_dict()
UpperCamelCase_ : str = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_ : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase_ : Optional[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCamelCase_ : Optional[int] = True
if "*" in mapped_key:
UpperCamelCase_ : Any = name.split(lowerCamelCase )[0].split('.' )[-2]
UpperCamelCase_ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
UpperCamelCase_ : Optional[Any] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_ : List[str] = 'weight_v'
elif "bias" in name:
UpperCamelCase_ : List[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase_ : Tuple = 'weight'
else:
UpperCamelCase_ : Optional[Any] = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] ):
UpperCamelCase_ : Optional[Any] = full_name.split('conv_layers.' )[-1]
UpperCamelCase_ : int = name.split('.' )
UpperCamelCase_ : str = int(items[0] )
UpperCamelCase_ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCamelCase_ : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCamelCase_ : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
UpperCamelCase_ : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
UpperCamelCase_ : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCamelCase )
@torch.no_grad()
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[Any]=True ):
if config_path is not None:
UpperCamelCase_ : Tuple = UniSpeechSatConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_ : List[str] = UniSpeechSatConfig()
UpperCamelCase_ : Union[str, Any] = ''
if is_finetuned:
UpperCamelCase_ : Any = UniSpeechSatForCTC(lowerCamelCase )
else:
UpperCamelCase_ : str = UniSpeechSatForPreTraining(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
UpperCamelCase_ : str = model[0].eval()
recursively_load_weights(lowerCamelCase , lowerCamelCase )
hf_wavavec.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 417
| 0
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a_ : Any = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
a_ : Any = json.load(f)
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
return FSMTTokenizer.from_pretrained(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration.from_pretrained(a).to(a)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
])
@slow
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[Any]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE = f'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE = self.get_tokenizer(a)
SCREAMING_SNAKE_CASE = self.get_model(a)
SCREAMING_SNAKE_CASE = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='pt' , truncation=a , padding='longest').to(a)
SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a)
SCREAMING_SNAKE_CASE = calculate_bleu(a , a)
print(a)
self.assertGreaterEqual(scores['bleu'] , a)
| 715
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class _snake_case :
_lowercase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowercase : bool = field(default=A__ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_lowercase : bool = field(default=A__ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _snake_case :
_lowercase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_lowercase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_lowercase : Optional[int] = field(
default=10_24 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : Optional[int] = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : Optional[int] = field(
default=1_42 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_lowercase : Optional[int] = field(
default=1_42 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_lowercase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_lowercase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_lowercase : Optional[str] = field(default=A__ , metadata={'''help''': '''Source language id for translation.'''} )
_lowercase : Optional[str] = field(default=A__ , metadata={'''help''': '''Target language id for translation.'''} )
_lowercase : Optional[int] = field(default=A__ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
logger.info(F'''***** {split} metrics *****''')
for key in sorted(metrics.keys()):
logger.info(F''' {key} = {metrics[key]}''')
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , F'''{split}_results.json'''))
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('.json'):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase)
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase))
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task)
# set num_beams for evaluation
if data_args.eval_beams is None:
SCREAMING_SNAKE_CASE = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast)):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(data_args.tgt_lang)
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase)
if model_args.freeze_encoder:
freeze_params(model.get_encoder())
assert_all_frozen(model.get_encoder())
SCREAMING_SNAKE_CASE = SeqaSeqDataset
# Get datasets
SCREAMING_SNAKE_CASE = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
SCREAMING_SNAKE_CASE = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
SCREAMING_SNAKE_CASE = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase) if training_args.predict_with_generate else None
)
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = {}
# Training
if training_args.do_train:
logger.info('*** Train ***')
SCREAMING_SNAKE_CASE = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir)
all_metrics.update(_UpperCAmelCase)
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json'))
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***')
SCREAMING_SNAKE_CASE = trainer.evaluate(metric_key_prefix='val')
SCREAMING_SNAKE_CASE = data_args.n_val
SCREAMING_SNAKE_CASE = round(metrics['val_loss'] , 4)
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir)
all_metrics.update(_UpperCAmelCase)
if training_args.do_predict:
logger.info('*** Predict ***')
SCREAMING_SNAKE_CASE = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test')
SCREAMING_SNAKE_CASE = test_output.metrics
SCREAMING_SNAKE_CASE = data_args.n_test
if trainer.is_world_process_zero():
SCREAMING_SNAKE_CASE = round(metrics['test_loss'] , 4)
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir)
all_metrics.update(_UpperCAmelCase)
if training_args.predict_with_generate:
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = lmap(str.strip , _UpperCAmelCase)
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt'))
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json'))
return all_metrics
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 444
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[int] = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'Speech2TextFeatureExtractor'
_lowercase = 'Speech2TextTokenizer'
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] =self.feature_extractor
SCREAMING_SNAKE_CASE_ : Optional[int] =False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
SCREAMING_SNAKE_CASE_ : Any =kwargs.pop('raw_speech' )
else:
SCREAMING_SNAKE_CASE_ : str =kwargs.pop('audio' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =kwargs.pop('sampling_rate' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =kwargs.pop('text' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ : Dict =args[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ : str =self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : str =encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
SCREAMING_SNAKE_CASE_ : int =self.tokenizer
yield
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.feature_extractor
SCREAMING_SNAKE_CASE_ : Tuple =False
| 220
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase : List[str] = """\
Text data.
Second line of data."""
lowercase : str = """file"""
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + """.zstd""")
_UpperCamelCase = bytes(lowerCAmelCase , '''utf-8''' )
with zstd.open(lowerCAmelCase , '''wb''' ) as f:
f.write(lowerCAmelCase )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase ) , '''w''' ) as f:
f.write(lowerCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
_UpperCamelCase = input_paths[compression_format]
_UpperCamelCase = tmp_path / """cache"""
_UpperCamelCase = DownloadConfig(cache_dir=lowerCAmelCase , extract_compressed_file=lowerCAmelCase )
_UpperCamelCase = cached_path(lowerCAmelCase , download_config=lowerCAmelCase )
with open(lowerCAmelCase ) as f:
_UpperCamelCase = f.read()
with open(lowerCAmelCase ) as f:
_UpperCamelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = """custom_cache"""
_UpperCamelCase = """custom_extracted_dir"""
_UpperCamelCase = tmp_path / """custom_extracted_path"""
if default_extracted:
_UpperCamelCase = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , lowerCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowerCAmelCase ) )
_UpperCamelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_UpperCamelCase = xz_file
_UpperCamelCase = (
DownloadConfig(extract_compressed_file=lowerCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase )
)
_UpperCamelCase = cached_path(lowerCAmelCase , download_config=lowerCAmelCase )
assert Path(lowerCAmelCase ).parent.parts[-2:] == expected
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
# absolute path
_UpperCamelCase = str(Path(lowerCAmelCase ).resolve() )
assert cached_path(lowerCAmelCase ) == text_file
# relative path
_UpperCamelCase = str(Path(lowerCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCAmelCase ) == text_file
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
# absolute path
_UpperCamelCase = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(lowerCAmelCase ):
cached_path(lowerCAmelCase )
# relative path
_UpperCamelCase = """./__missing_file__.txt"""
with pytest.raises(lowerCAmelCase ):
cached_path(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(lowerCAmelCase ) as f:
_UpperCamelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ):
with pytest.raises(lowerCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / """file.html"""
with pytest.raises(lowerCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=lowerCAmelCase )
with pytest.raises(lowerCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / """file.html"""
with pytest.raises(lowerCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=lowerCAmelCase )
with pytest.raises(lowerCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / """file.html"""
with pytest.raises(lowerCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=lowerCAmelCase )
with pytest.raises(lowerCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 714
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase : str = logging.get_logger(__name__)
class __A( __UpperCAmelCase ):
def __init__( self, *A, **A ):
"""simple docstring"""
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''', A, )
super().__init__(*A, **A )
| 105
| 0
|
import numpy as np
def a (_lowerCAmelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
|
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["flax"]
def __init__( self: Dict , *_lowerCamelCase: Tuple , **_lowerCamelCase: List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: Dict , *_lowerCamelCase: Optional[Any] , **_lowerCamelCase: List[Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: Tuple , *_lowerCamelCase: Tuple , **_lowerCamelCase: Optional[int] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["flax"]
def __init__( self: Union[str, Any] , *_lowerCamelCase: Any , **_lowerCamelCase: Dict ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: Union[str, Any] , *_lowerCamelCase: Any , **_lowerCamelCase: Tuple ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: int , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Optional[int] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ["flax"]
def __init__( self: int , *_lowerCamelCase: Tuple , **_lowerCamelCase: List[Any] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: Optional[Any] , *_lowerCamelCase: Optional[Any] , **_lowerCamelCase: List[str] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: Optional[int] , *_lowerCamelCase: str , **_lowerCamelCase: int ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["flax"]
def __init__( self: int , *_lowerCamelCase: Dict , **_lowerCamelCase: Dict ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: Optional[int] , *_lowerCamelCase: Any , **_lowerCamelCase: Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: Any , *_lowerCamelCase: Optional[int] , **_lowerCamelCase: Union[str, Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ["flax"]
def __init__( self: Tuple , *_lowerCamelCase: List[str] , **_lowerCamelCase: Union[str, Any] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: str , *_lowerCamelCase: Optional[Any] , **_lowerCamelCase: Optional[Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: Any , *_lowerCamelCase: str , **_lowerCamelCase: List[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["flax"]
def __init__( self: Dict , *_lowerCamelCase: str , **_lowerCamelCase: List[Any] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: Tuple , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Optional[Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: Union[str, Any] , *_lowerCamelCase: Union[str, Any] , **_lowerCamelCase: Any ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ["flax"]
def __init__( self: str , *_lowerCamelCase: List[str] , **_lowerCamelCase: List[Any] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: List[Any] , *_lowerCamelCase: Dict , **_lowerCamelCase: Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: int , *_lowerCamelCase: Dict , **_lowerCamelCase: List[str] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["flax"]
def __init__( self: Dict , *_lowerCamelCase: Tuple , **_lowerCamelCase: int ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: List[Any] , *_lowerCamelCase: Dict , **_lowerCamelCase: Tuple ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: Optional[int] , *_lowerCamelCase: Union[str, Any] , **_lowerCamelCase: Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["flax"]
def __init__( self: int , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Optional[int] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: Union[str, Any] , *_lowerCamelCase: Optional[Any] , **_lowerCamelCase: Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: Optional[Any] , *_lowerCamelCase: Any , **_lowerCamelCase: int ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ["flax"]
def __init__( self: Optional[Any] , *_lowerCamelCase: Union[str, Any] , **_lowerCamelCase: Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: Optional[Any] , *_lowerCamelCase: Union[str, Any] , **_lowerCamelCase: Tuple ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: Union[str, Any] , *_lowerCamelCase: Tuple , **_lowerCamelCase: str ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["flax"]
def __init__( self: str , *_lowerCamelCase: Any , **_lowerCamelCase: int ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: List[Any] , *_lowerCamelCase: Optional[int] , **_lowerCamelCase: Tuple ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: Dict , *_lowerCamelCase: str , **_lowerCamelCase: int ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ["flax"]
def __init__( self: Any , *_lowerCamelCase: Union[str, Any] , **_lowerCamelCase: Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: Tuple , *_lowerCamelCase: Union[str, Any] , **_lowerCamelCase: Union[str, Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: Dict , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=__UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["flax"]
def __init__( self: Tuple , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Optional[Any] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls: Any , *_lowerCamelCase: List[str] , **_lowerCamelCase: Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls: int , *_lowerCamelCase: Tuple , **_lowerCamelCase: Optional[int] ):
requires_backends(cls , ['''flax'''] )
| 234
| 1
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase ( UpperCAmelCase_ : str )-> List[str]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
a =model_type_to_module_name(UpperCAmelCase_ )
a =importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(UpperCAmelCase_ , UpperCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCAmelCase_ , """__name__""" , UpperCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a =importlib.import_module("""transformers""" )
if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
return getattr(UpperCAmelCase_ , UpperCAmelCase_ )
return None
def lowerCamelCase ( UpperCAmelCase_ : Union[str, os.PathLike] , UpperCAmelCase_ : Optional[Union[str, os.PathLike]] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[Dict[str, str]] = None , UpperCAmelCase_ : Optional[Union[bool, str]] = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : int , )-> str:
"""simple docstring"""
a =get_file_from_repo(
UpperCAmelCase_ , UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , revision=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as reader:
return json.load(UpperCAmelCase_ )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self ):
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_lowerCAmelCase )
def lowerCAmelCase__ ( cls , _lowerCAmelCase , **_lowerCAmelCase ):
a =kwargs.pop("""config""" , _lowerCAmelCase )
a =kwargs.pop("""trust_remote_code""" , _lowerCAmelCase )
a =True
a , a =ImageProcessingMixin.get_image_processor_dict(_lowerCAmelCase , **_lowerCAmelCase )
a =config_dict.get("""image_processor_type""" , _lowerCAmelCase )
a =None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
a =config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
a =config_dict.pop("""feature_extractor_type""" , _lowerCAmelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
a =feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
a =config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
a =feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
a =AutoConfig.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# It could be in `config.image_processor_type``
a =getattr(_lowerCAmelCase , """image_processor_type""" , _lowerCAmelCase )
if hasattr(_lowerCAmelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
a =config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
a =image_processor_class_from_name(_lowerCAmelCase )
a =image_processor_auto_map is not None
a =image_processor_class is not None or type(_lowerCAmelCase ) in IMAGE_PROCESSOR_MAPPING
a =resolve_trust_remote_code(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if has_remote_code and trust_remote_code:
a =get_class_from_dynamic_module(
_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
a =kwargs.pop("""code_revision""" , _lowerCAmelCase )
if os.path.isdir(_lowerCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowerCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
a =IMAGE_PROCESSOR_MAPPING[type(_lowerCAmelCase )]
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase , _lowerCAmelCase ):
IMAGE_PROCESSOR_MAPPING.register(_lowerCAmelCase , _lowerCAmelCase )
| 321
|
_lowerCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowerCamelCase ( )-> None:
"""simple docstring"""
a =input("""Enter message: """ )
a =input("""Enter key [alphanumeric]: """ )
a =input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a ="""encrypt"""
a =encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith("""d""" ):
a ="""decrypt"""
a =decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase_ )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
a =[]
a =0
a =key.upper()
for symbol in message:
a =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase_ ):
a =0
else:
translated.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 321
| 1
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _UpperCAmelCase ( A__ ):
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ :Any = pa.array(TypedSequence([1, 2, 3]))
self.assertEqual(arr.type , pa.intaa())
def _snake_case ( self : Optional[Any]):
with self.assertRaises(_snake_case):
SCREAMING_SNAKE_CASE_ :List[Any] = pa.array(TypedSequence([1, 2, 3]) , type=pa.intaa())
def _snake_case ( self : Union[str, Any]):
with self.assertRaises(_snake_case):
SCREAMING_SNAKE_CASE_ :List[str] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool") , type=Value("int64")))
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :List[Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("int32")))
self.assertEqual(arr.type , pa.intaa())
def _snake_case ( self : Optional[Any]):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
SCREAMING_SNAKE_CASE_ :List[str] = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64")))
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Tuple = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32")))
self.assertEqual(arr.type , pa.intaa())
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :str = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64")))
self.assertEqual(arr.type , pa.string())
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :str = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64")))
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64"))
def _snake_case ( self : Any):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
SCREAMING_SNAKE_CASE_ :Tuple = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64")))
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Dict = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64")))
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64"))
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64")))
self.assertEqual(arr.type , pa.string())
@require_pil
def _snake_case ( self : Union[str, Any]):
import PIL.Image
SCREAMING_SNAKE_CASE_ :int = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta).reshape(2 , 5))
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=_snake_case) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE_ :Dict = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image()))
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , _snake_case)
self.assertFalse(kwargs["optimize_list_casting"])
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = pa.BufferReader(A__ ) if isinstance(A__ , pa.Buffer ) else pa.memory_map(A__ )
SCREAMING_SNAKE_CASE_ :List[Any] = pa.ipc.open_stream(A__ )
SCREAMING_SNAKE_CASE_ :Tuple = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ :Tuple = pa.schema(A__ ) if fields else None
with ArrowWriter(stream=A__ , schema=A__ , writer_batch_size=A__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ :Any = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(A__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Union[str, Any] = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ :Dict = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=A__ , features=A__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE_ :Any = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_ :Tuple = pa.ipc.open_stream(A__ )
SCREAMING_SNAKE_CASE_ :Tuple = f.read_all()
SCREAMING_SNAKE_CASE_ :Dict = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(A__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=A__ , writer_batch_size=A__ , hash_salt="split_name" , check_duplicates=A__ , ) as writer:
with pytest.raises(A__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=A__ , writer_batch_size=A__ , hash_salt="split_name" , check_duplicates=A__ , ) as writer:
with pytest.raises(A__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :str = pa.BufferOutputStream()
with ArrowWriter(
stream=A__ , writer_batch_size=A__ , hash_salt="split_name" , check_duplicates=A__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ :int = pa.schema(A__ ) if fields else None
with ArrowWriter(stream=A__ , schema=A__ , writer_batch_size=A__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ :int = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(A__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ :Dict = pa.schema(A__ ) if fields else None
with ArrowWriter(stream=A__ , schema=A__ , writer_batch_size=A__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ :Optional[int] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(A__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ :List[Any] = pa.schema(A__ ) if fields else None
with ArrowWriter(stream=A__ , schema=A__ , writer_batch_size=A__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ :str = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(A__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ :Tuple = {"col_1": pa.string(), "col_2": pa.intaa()}
SCREAMING_SNAKE_CASE_ :Tuple = os.path.join(A__ , "test.arrow" )
with ArrowWriter(path=A__ , schema=pa.schema(A__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(A__ , metadata=writer._schema.metadata )
_check_output(A__ , 1 )
def lowercase ( a ):
'''simple docstring'''
if pa.types.is_list(A__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowercase ( a , a ):
'''simple docstring'''
if isinstance(lst[0] , A__ ):
change_first_primitive_element_in_list(lst[0] , A__ )
else:
SCREAMING_SNAKE_CASE_ :Dict = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = pa.array(TypedSequence(A__ , optimized_int_type=A__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = pa.array(OptimizedTypedSequence(A__ , col=A__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE_ :List[Any] = copy.deepcopy(A__ )
SCREAMING_SNAKE_CASE_ :int = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(A__ , A__ )
SCREAMING_SNAKE_CASE_ :Optional[Any] = pa.array(OptimizedTypedSequence(A__ , col=A__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=A__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = "mock://dataset-train.arrow"
with ArrowWriter(path=A__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(A__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(A__ )
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Dict = pa.BufferOutputStream()
with ParquetWriter(stream=A__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE_ :Union[str, Any] = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_ :Tuple = pq.read_table(A__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def lowercase ( a , a ):
'''simple docstring'''
import PIL.Image
SCREAMING_SNAKE_CASE_ :Union[str, Any] = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(A__ , format="png" )
SCREAMING_SNAKE_CASE_ :Any = pa.BufferOutputStream()
with ParquetWriter(
stream=A__ , features=Features({"image": Image()} ) , embed_local_files=A__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE_ :Tuple = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_ :Optional[int] = pq.read_table(A__ )
SCREAMING_SNAKE_CASE_ :Any = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , A__ )
with open(A__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = pa.schema([pa.field("col_1" , pa.string() , nullable=A__ )] )
SCREAMING_SNAKE_CASE_ :Tuple = pa.BufferOutputStream()
with ArrowWriter(stream=A__ ) as writer:
writer._build_writer(inferred_schema=A__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 631
|
from __future__ import annotations
__magic_name__ = list[list[int]]
# assigning initial values to the grid
__magic_name__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__magic_name__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowerCAmelCase ( A__: Matrix , A__: int , A__: int , A__: int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowerCAmelCase ( A__: Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowerCAmelCase ( A__: Matrix ):
'''simple docstring'''
if location := find_empty_location(A__ ):
UpperCAmelCase , UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A__ , A__ , A__ , A__ ):
UpperCAmelCase = digit
if sudoku(A__ ) is not None:
return grid
UpperCAmelCase = 0
return None
def _lowerCAmelCase ( A__: Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(A__ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__magic_name__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 254
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ : Tuple = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase__ )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
A_ : Tuple = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase__ , id=UpperCAmelCase__ )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if exitstatus == 5:
A_ : str = 0
# Doctest custom flag to ignore output.
lowerCamelCase_ : List[Any] = doctest.register_optionflag('IGNORE_RESULT')
lowerCamelCase_ : str = doctest.OutputChecker
class _UpperCAmelCase ( _a ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _A , _A , _A )
lowerCamelCase_ : List[Any] = CustomOutputChecker
lowerCamelCase_ : Any = HfDoctestModule
lowerCamelCase_ : Union[str, Any] = HfDocTestParser
| 714
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int
lowercase_ : int
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : list[list[Edge]] = [[] for _ in range(snake_case_ )]
A_ : Optional[int] = size
def __getitem__( self , snake_case_ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._size
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Optional[Any] = 0
while queue:
A_ : Union[str, Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : int = current_distance + edge.weight
A_ : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
A_ : Union[str, Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
| 0
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = RobertaTokenizer
__lowerCAmelCase = RobertaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = {'''cls_token''': '''<s>'''}
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__a : List[str] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__a : Optional[Any] = {'''unk_token''': '''<unk>'''}
__a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = '''lower newer'''
__a : Any = '''lower newer'''
return input_text, output_text
def _lowerCamelCase ( self ):
__a : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : int = '''lower newer'''
__a : int = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__a : Optional[int] = tokenizer.tokenize(_UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : str = tokens + [tokenizer.unk_token]
__a : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=_UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=_UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowerCamelCase ( self ):
__a : List[str] = self.tokenizer_class.from_pretrained('''roberta-base''' )
__a : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase )
__a : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase )
__a : List[Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : str = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__a : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_tokenizer()
__a : str = '''Encode this sequence.'''
__a : List[Any] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Union[str, Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__a : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing spaces after special tokens
__a : Dict = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )} ) # mask token has a left space
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__a : List[Any] = '''Encode <mask> sequence'''
__a : Optional[Any] = '''Encode <mask>sequence'''
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = encoded.index(_UpperCAmelCase )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase )
__a : Tuple = encoded.index(_UpperCAmelCase )
__a : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__a : Tuple = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__a : List[str] = '''A, <mask> AllenNLP sentence.'''
__a : Dict = tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
__a : Tuple = tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__a : int = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__a : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _lowerCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__a : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__a : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , _UpperCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , _UpperCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__a : List[Any] = f"""{text_of_1_token} {text_of_1_token}"""
__a : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Dict = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : List[str] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : List[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Dict = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Optional[int] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : Optional[int] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Tuple = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ) + 1, 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : int = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Optional[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Tuple = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
| 52
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :torch.FloatTensor
class UpperCamelCase_ ( A , A ):
'''simple docstring'''
@register_to_config
def __init__( self , _UpperCAmelCase = 32 , _UpperCAmelCase = 64 , _UpperCAmelCase = 20 , _UpperCAmelCase = 768 , _UpperCAmelCase=77 , _UpperCAmelCase=4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = "silu" , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "linear" , _UpperCAmelCase = "prd" , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
super().__init__()
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = attention_head_dim
lowerCAmelCase_ = num_attention_heads * attention_head_dim
lowerCAmelCase_ = additional_embeddings
lowerCAmelCase_ = time_embed_dim or inner_dim
lowerCAmelCase_ = embedding_proj_dim or embedding_dim
lowerCAmelCase_ = clip_embed_dim or embedding_dim
lowerCAmelCase_ = Timesteps(_UpperCAmelCase , _UpperCAmelCase , 0)
lowerCAmelCase_ = TimestepEmbedding(_UpperCAmelCase , _UpperCAmelCase , out_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase)
lowerCAmelCase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase)
if embedding_proj_norm_type is None:
lowerCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
lowerCAmelCase_ = nn.LayerNorm(_UpperCAmelCase)
else:
raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}')
lowerCAmelCase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase)
if encoder_hid_proj_type is None:
lowerCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
lowerCAmelCase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase)
else:
raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}')
lowerCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _UpperCAmelCase))
if added_emb_type == "prd":
lowerCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , _UpperCAmelCase))
elif added_emb_type is None:
lowerCAmelCase_ = None
else:
raise ValueError(
f'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.')
lowerCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dropout=_UpperCAmelCase , activation_fn='''gelu''' , attention_bias=_UpperCAmelCase , )
for d in range(_UpperCAmelCase)
])
if norm_in_type == "layer":
lowerCAmelCase_ = nn.LayerNorm(_UpperCAmelCase)
elif norm_in_type is None:
lowerCAmelCase_ = None
else:
raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.')
lowerCAmelCase_ = nn.LayerNorm(_UpperCAmelCase)
lowerCAmelCase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase)
lowerCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0)
causal_attention_mask.triu_(1)
lowerCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , _UpperCAmelCase , persistent=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.zeros(1 , _UpperCAmelCase))
lowerCAmelCase_ = nn.Parameter(torch.zeros(1 , _UpperCAmelCase))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase__ ( self):
lowerCAmelCase_ = {}
def fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if hasattr(_UpperCAmelCase , '''set_processor'''):
lowerCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' , _UpperCAmelCase , _UpperCAmelCase)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return processors
def lowercase__ ( self , _UpperCAmelCase):
lowerCAmelCase_ = len(self.attn_processors.keys())
if isinstance(_UpperCAmelCase , _UpperCAmelCase) and len(_UpperCAmelCase) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(_UpperCAmelCase)} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.')
def fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if hasattr(_UpperCAmelCase , '''set_processor'''):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
module.set_processor(_UpperCAmelCase)
else:
module.set_processor(processor.pop(f'{name}.processor'))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' , _UpperCAmelCase , _UpperCAmelCase)
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def lowercase__ ( self):
self.set_attn_processor(AttnProcessor())
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , ):
lowerCAmelCase_ = hidden_states.shape[0]
lowerCAmelCase_ = timestep
if not torch.is_tensor(_UpperCAmelCase):
lowerCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(_UpperCAmelCase) and len(timesteps.shape) == 0:
lowerCAmelCase_ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase_ = timesteps * torch.ones(_UpperCAmelCase , dtype=timesteps.dtype , device=timesteps.device)
lowerCAmelCase_ = self.time_proj(_UpperCAmelCase)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCAmelCase_ = timesteps_projected.to(dtype=self.dtype)
lowerCAmelCase_ = self.time_embedding(_UpperCAmelCase)
if self.embedding_proj_norm is not None:
lowerCAmelCase_ = self.embedding_proj_norm(_UpperCAmelCase)
lowerCAmelCase_ = self.embedding_proj(_UpperCAmelCase)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCAmelCase_ = self.encoder_hidden_states_proj(_UpperCAmelCase)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''')
lowerCAmelCase_ = self.proj_in(_UpperCAmelCase)
lowerCAmelCase_ = self.positional_embedding.to(hidden_states.dtype)
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(_UpperCAmelCase)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
lowerCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
lowerCAmelCase_ = hidden_states[:, None, :]
lowerCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCAmelCase_ = self.prd_embedding.to(hidden_states.dtype).expand(_UpperCAmelCase , -1 , -1)
additional_embeds.append(_UpperCAmelCase)
lowerCAmelCase_ = torch.cat(
_UpperCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCAmelCase_ = F.pad(
_UpperCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
lowerCAmelCase_ = F.pad(_UpperCAmelCase , (0, self.additional_embeddings) , value=0.0)
lowerCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
lowerCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
lowerCAmelCase_ = self.norm_in(_UpperCAmelCase)
for block in self.transformer_blocks:
lowerCAmelCase_ = block(_UpperCAmelCase , attention_mask=_UpperCAmelCase)
lowerCAmelCase_ = self.norm_out(_UpperCAmelCase)
if self.prd_embedding is not None:
lowerCAmelCase_ = hidden_states[:, -1]
else:
lowerCAmelCase_ = hidden_states[:, additional_embeddings_len:]
lowerCAmelCase_ = self.proj_to_clip_embeddings(_UpperCAmelCase)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase):
lowerCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 413
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_snake_case = logging.get_logger(__name__)
def lowerCamelCase_ ( A : List[str] , A : Optional[int] ):
"""simple docstring"""
lowerCAmelCase_ = nn.functional.normalize(A )
lowerCAmelCase_ = nn.functional.normalize(A )
return torch.mm(A , normalized_text_embeds.t() )
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Tuple = CLIPConfig
a :int = ['CLIPEncoderLayer']
def __init__( self , _UpperCAmelCase):
super().__init__(_UpperCAmelCase)
lowerCAmelCase_ = CLIPVisionModel(config.vision_config)
lowerCAmelCase_ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(17 , config.projection_dim) , requires_grad=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(3 , config.projection_dim) , requires_grad=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(17) , requires_grad=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(3) , requires_grad=_UpperCAmelCase)
@torch.no_grad()
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = self.vision_model(_UpperCAmelCase)[1] # pooled_output
lowerCAmelCase_ = self.visual_projection(_UpperCAmelCase)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.special_care_embeds).cpu().float().numpy()
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.concept_embeds).cpu().float().numpy()
lowerCAmelCase_ = []
lowerCAmelCase_ = image_embeds.shape[0]
for i in range(_UpperCAmelCase):
lowerCAmelCase_ = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase_ = 0.0
for concept_idx in range(len(special_cos_dist[0])):
lowerCAmelCase_ = special_cos_dist[i][concept_idx]
lowerCAmelCase_ = self.special_care_embeds_weights[concept_idx].item()
lowerCAmelCase_ = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]})
lowerCAmelCase_ = 0.01
for concept_idx in range(len(cos_dist[0])):
lowerCAmelCase_ = cos_dist[i][concept_idx]
lowerCAmelCase_ = self.concept_embeds_weights[concept_idx].item()
lowerCAmelCase_ = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase)
result.append(_UpperCAmelCase)
lowerCAmelCase_ = [len(res['''bad_concepts''']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = self.vision_model(_UpperCAmelCase)[1] # pooled_output
lowerCAmelCase_ = self.visual_projection(_UpperCAmelCase)
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.special_care_embeds)
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCAmelCase_ = torch.any(special_scores > 0 , dim=1)
lowerCAmelCase_ = special_care * 0.01
lowerCAmelCase_ = special_adjustment.unsqueeze(1).expand(-1 , cos_dist.shape[1])
lowerCAmelCase_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCAmelCase_ = torch.any(concept_scores > 0 , dim=1)
return images, has_nsfw_concepts
| 413
| 1
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A=1_024, A=1_024, A=3.6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = tokenizer
SCREAMING_SNAKE_CASE : str = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Tuple = dataset
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : Tuple = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = iter(self.dataset )
SCREAMING_SNAKE_CASE : List[Any] = True
while more_examples:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
SCREAMING_SNAKE_CASE : Dict = False
break
SCREAMING_SNAKE_CASE : Dict = tokenizer(A, truncation=A )['input_ids']
SCREAMING_SNAKE_CASE : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0, len(A ), self.seq_length ):
SCREAMING_SNAKE_CASE : Union[str, Any] = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def lowercase__( __UpperCamelCase: Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {'streaming': True}
SCREAMING_SNAKE_CASE : Dict = load_dataset(args.dataset_name ,split='train' ,**__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = ConstantLengthDataset(__UpperCamelCase ,__UpperCamelCase ,seq_length=args.seq_length )
SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(__UpperCamelCase ,batch_size=args.batch_size )
return eval_dataloader
def lowercase__( __UpperCamelCase: Any ):
"""simple docstring"""
model.eval()
SCREAMING_SNAKE_CASE : Any = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(__UpperCamelCase ,labels=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
SCREAMING_SNAKE_CASE : str = torch.mean(torch.cat(__UpperCamelCase ) )
try:
SCREAMING_SNAKE_CASE : Any = torch.exp(__UpperCamelCase )
except OverflowError:
SCREAMING_SNAKE_CASE : List[str] = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCamelCase_ = Accelerator()
# Parse configuration
UpperCamelCase_ = HfArgumentParser(EvaluationArguments)
UpperCamelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCamelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCamelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
UpperCamelCase_ , UpperCamelCase_ = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 28
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case : Optional[int] = get_logger(__name__)
snake_case : Union[str, Any] = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _snake_case :
@add_start_docstrings(_a )
def __call__( self , _a , _a ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case :
@add_start_docstrings(_a )
def __call__( self , _a , _a ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _snake_case ( snake_case ):
@add_start_docstrings(_a )
def __call__( self , _a , _a , _a , **_a ):
for processor in self:
__magic_name__ : str = inspect.signature(processor.__call__ ).parameters
if len(_a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__magic_name__ : Tuple = processor(_a , _a , _a , **_a )
else:
__magic_name__ : int = processor(_a , _a , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
if not isinstance(_a , _a ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__magic_name__ : str = temperature
def __call__( self , _a , _a , _a ):
__magic_name__ : List[Any] = scores / self.temperature
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a = -float("Inf" ) , _a = 1 ):
if not isinstance(_a , _a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_a , _a ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__magic_name__ : List[str] = top_p
__magic_name__ : Optional[Any] = filter_value
__magic_name__ : Tuple = min_tokens_to_keep
def __call__( self , _a , _a , _a ):
__magic_name__ , __magic_name__ : str = lax.top_k(_a , scores.shape[-1] )
__magic_name__ : Dict = jnp.full_like(_a , self.filter_value )
__magic_name__ : Union[str, Any] = jax.nn.softmax(_a , axis=-1 ).cumsum(axis=-1 )
__magic_name__ : int = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__magic_name__ : Tuple = jnp.roll(_a , 1 )
score_mask |= score_mask.at[:, 0].set(_a )
# min tokens to keep
__magic_name__ : int = score_mask.at[:, : self.min_tokens_to_keep].set(_a )
__magic_name__ : Any = jnp.where(_a , _a , _a )
__magic_name__ : Union[str, Any] = jax.lax.sort_key_val(_a , _a )[-1]
return next_scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a = -float("Inf" ) , _a = 1 ):
if not isinstance(_a , _a ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__magic_name__ : str = max(_a , _a )
__magic_name__ : Any = filter_value
def __call__( self , _a , _a , _a ):
__magic_name__ , __magic_name__ : Optional[Any] = scores.shape
__magic_name__ : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value )
__magic_name__ : Tuple = min(self.top_k , scores.shape[-1] ) # Safety check
__magic_name__ , __magic_name__ : str = lax.top_k(_a , _a )
__magic_name__ : Optional[int] = jnp.broadcast_to((jnp.arange(_a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__magic_name__ : int = topk_scores.flatten()
__magic_name__ : List[Any] = topk_indices.flatten() + shift
__magic_name__ : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(_a )
__magic_name__ : List[Any] = next_scores_flat.reshape(_a , _a )
return next_scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
__magic_name__ : Dict = bos_token_id
def __call__( self , _a , _a , _a ):
__magic_name__ : Optional[int] = jnp.full(scores.shape , -float("inf" ) )
__magic_name__ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
__magic_name__ : Dict = jnp.where(_a , new_scores.at[:, self.bos_token_id].set(0 ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
__magic_name__ : Tuple = max_length
__magic_name__ : int = eos_token_id
def __call__( self , _a , _a , _a ):
__magic_name__ : Dict = jnp.full(scores.shape , -float("inf" ) )
__magic_name__ : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__magic_name__ : Dict = jnp.where(_a , new_scores.at[:, self.eos_token_id].set(0 ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
if not isinstance(_a , _a ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_a , _a ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__magic_name__ : int = min_length
__magic_name__ : Any = eos_token_id
def __call__( self , _a , _a , _a ):
# create boolean flag to decide if min length penalty should be applied
__magic_name__ : Optional[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__magic_name__ : List[Any] = jnp.where(_a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a ):
__magic_name__ : Dict = list(_a )
__magic_name__ : Union[str, Any] = begin_index
def __call__( self , _a , _a , _a ):
__magic_name__ : Any = 1 - jnp.bool_(cur_len - self.begin_index )
__magic_name__ : Dict = jnp.where(_a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _a )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
__magic_name__ : List[Any] = list(_a )
def __call__( self , _a , _a , _a ):
__magic_name__ : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a ):
__magic_name__ : Tuple = dict(_a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__magic_name__ : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__magic_name__ : Any = force_token_array.at[index].set(_a )
__magic_name__ : List[Any] = jnp.intaa(_a )
def __call__( self , _a , _a , _a ):
def _force_token(_a ):
__magic_name__ : Any = scores.shape[0]
__magic_name__ : int = self.force_token_array[generation_idx]
__magic_name__ : Union[str, Any] = jnp.ones_like(_a , dtype=scores.dtype ) * -float("inf" )
__magic_name__ : str = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__magic_name__ : Any = lax.dynamic_update_slice(_a , _a , (0, current_token) )
return new_scores
__magic_name__ : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_a ) , lambda: scores , ) , )
return scores
class _snake_case ( snake_case ):
def __init__( self , _a , _a , _a ):
__magic_name__ : Optional[int] = generate_config.eos_token_id
__magic_name__ : List[Any] = generate_config.no_timestamps_token_id
__magic_name__ : Optional[Any] = generate_config.no_timestamps_token_id + 1
__magic_name__ : int = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_a , "max_initial_timestamp_index" ):
__magic_name__ : List[Any] = generate_config.max_initial_timestamp_index
else:
__magic_name__ : str = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__magic_name__ : str = model_config.vocab_size
def __call__( self , _a , _a , _a ):
# suppress <|notimestamps|> which is handled by without_timestamps
__magic_name__ : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_a , _a ):
__magic_name__ : int = jnp.where((cur_len - self.begin_index) >= 1 , _a , _a )
__magic_name__ : str = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _a , )
__magic_name__ : Dict = jnp.where((cur_len - self.begin_index) < 2 , _a , _a )
__magic_name__ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _a , _a , )
return jnp.where(
_a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _a , )
__magic_name__ : Any = jax.vmap(_a )(_a , _a )
__magic_name__ : Dict = jnp.where(cur_len == self.begin_index , _a , _a )
__magic_name__ : List[str] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _a , )
__magic_name__ : Union[str, Any] = self.timestamp_begin + self.max_initial_timestamp_index
__magic_name__ : Tuple = jnp.where(
_a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__magic_name__ : Union[str, Any] = jax.nn.log_softmax(_a , axis=-1 )
def handle_cumulative_probs(_a , _a ):
__magic_name__ : int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__magic_name__ : int = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _a , )
__magic_name__ : int = jax.vmap(_a )(_a , _a )
return scores
| 124
| 0
|
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
__snake_case , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase : int = logging.get_logger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self: List[str] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Union[int, float] = 1 / 255 ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: bool = True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : int = size if size is not None else {"height": 384, "width": 384}
_lowerCamelCase : Optional[int] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : str = do_resize
_lowerCamelCase : List[str] = size
_lowerCamelCase : Optional[Any] = resample
_lowerCamelCase : str = do_rescale
_lowerCamelCase : int = rescale_factor
_lowerCamelCase : int = do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase : Optional[Any] = do_convert_rgb
def _lowercase ( self: Dict ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Dict[str, int] ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Optional[Any] ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_lowerCamelCase : Dict = (size["height"], size["width"])
return resize(__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[int, float] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: List[str] ,):
'''simple docstring'''
return rescale(__lowerCAmelCase ,scale=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
return normalize(__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: ImageInput ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[Dict[str, int]] = None ,__lowerCAmelCase: PILImageResampling = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[float] = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST ,**__lowerCAmelCase: Tuple ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : Dict = size if size is not None else self.size
_lowerCamelCase : Tuple = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : Union[str, Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : List[Any] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
_lowerCamelCase : Tuple = [self.resize(image=__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCamelCase : Optional[int] = [self.rescale(image=__lowerCAmelCase ,scale=__lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCamelCase : Dict = [self.normalize(image=__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(__lowerCAmelCase ,__lowerCAmelCase ) for image in images]
_lowerCamelCase : str = BatchFeature(data={"pixel_values": images} ,tensor_type=__lowerCAmelCase )
return encoded_outputs
| 386
| 0
|
from scipy.stats import spearmanr
import datasets
A : int = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
A : Any = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
A : Union[str, Any] = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str]=False ) -> Tuple:
"""simple docstring"""
lowercase__ = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 15
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14
| 0
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
UpperCamelCase__ = k.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return k
def _UpperCamelCase (a__ :str , a__ :Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = DEFAULTS.copy()
cfg_kwargs.update(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = PegasusConfig(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = PegasusForConditionalGeneration(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = torch_model.model.state_dict()
UpperCamelCase__ = {}
for k, v in tf_weights.items():
UpperCamelCase__ = rename_state_dict_key(__SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
UpperCamelCase__ = v.T
UpperCamelCase__ = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
UpperCamelCase__ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
UpperCamelCase__ = mapping["shared.weight"]
UpperCamelCase__ = mapping["shared.weight"]
UpperCamelCase__ = {k: torch.zeros_like(__SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = torch_model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def _UpperCamelCase (a__ :Tuple="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
UpperCamelCase__ = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {}
UpperCamelCase__ = ["Adafactor", "global_step"]
for name, shape in tqdm(__SCREAMING_SNAKE_CASE , desc="""converting tf checkpoint to dict""" ):
UpperCamelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase__ = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ = array
return tf_weights
def _UpperCamelCase (a__ :int , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = Path(__SCREAMING_SNAKE_CASE ).parent.name
UpperCamelCase__ = task_specific_params[f"""summarization_{dataset}"""]["max_position_embeddings"]
UpperCamelCase__ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__SCREAMING_SNAKE_CASE )
# convert model
UpperCamelCase__ = get_tf_weights_as_numpy(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
UpperCamelCase__ = task_specific_params
UpperCamelCase__ = convert_pegasus(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__SCREAMING_SNAKE_CASE , Path(__SCREAMING_SNAKE_CASE ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase__ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase__ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase__ = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 704
|
import math
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
UpperCamelCase__ = [True] * n
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase__ = i * 2
while index < n:
UpperCamelCase__ = False
UpperCamelCase__ = index + i
UpperCamelCase__ = [2]
for i in range(3 , a__ , 2 ):
if is_prime[i]:
primes.append(a__ )
return primes
def _UpperCamelCase (a__ :int = 9999_6666_3333 ):
"""simple docstring"""
UpperCamelCase__ = math.floor(math.sqrt(a__ ) ) + 100
UpperCamelCase__ = prime_sieve(a__ )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase__ = primes[prime_index + 1]
UpperCamelCase__ = last_prime**2
UpperCamelCase__ = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 548
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.