code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __magic_name__ ( UpperCamelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""pixel_values"""]
def __init__( self : Any , snake_case_ : Any = True , snake_case_ : List[Any] = None , snake_case_ : Tuple = PILImageResampling.BICUBIC , snake_case_ : str = True , snake_case_ : Any = None , snake_case_ : List[str] = True , snake_case_ : List[Any] = 1 / 255 , snake_case_ : Union[str, Any] = True , snake_case_ : List[Any] = None , snake_case_ : str = None , snake_case_ : Dict = True , **snake_case_ : Tuple , ):
super().__init__(**_a )
__snake_case = size if size is not None else {"""shortest_edge""": 224}
__snake_case = get_size_dict(_a , default_to_square=_a )
__snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__snake_case = get_size_dict(_a , default_to_square=_a , param_name="crop_size" )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case = do_convert_rgb
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Optional[int] = PILImageResampling.BICUBIC , snake_case_ : int = None , **snake_case_ : Union[str, Any] , ):
__snake_case = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__snake_case = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : int = None , **snake_case_ : Optional[int] , ):
__snake_case = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : int = None , **snake_case_ : Union[str, Any] , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def lowerCAmelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : str , snake_case_ : Optional[int] = None , **snake_case_ : List[Any] , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def lowerCAmelCase ( self : int , snake_case_ : Union[str, Any] , snake_case_ : int = None , snake_case_ : str = None , snake_case_ : Union[str, Any] = None , snake_case_ : int = None , snake_case_ : Tuple = None , snake_case_ : str = None , snake_case_ : Dict = None , snake_case_ : List[Any] = None , snake_case_ : List[str] = None , snake_case_ : int = None , snake_case_ : Union[str, Any] = None , snake_case_ : List[str] = None , snake_case_ : Any = ChannelDimension.FIRST , **snake_case_ : Union[str, Any] , ):
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(_a , param_name="size" , default_to_square=_a )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(_a , param_name="crop_size" , default_to_square=_a )
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(_a ) for image in images]
if do_resize:
__snake_case = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
__snake_case = [to_channel_dimension_format(_a , _a ) for image in images]
__snake_case = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 163 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a :Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a :str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
SCREAMING_SNAKE_CASE__ : Dict = value
else:
SCREAMING_SNAKE_CASE__ : Tuple = value
return new_state_dict
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : str = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE__ : Any = in_proj_bias[:256]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[:256]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias_cross_attn[-256:]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = 800 if """detection""" in checkpoint_url else 1000
SCREAMING_SNAKE_CASE__ : List[str] = target_max_size / current_max_size
SCREAMING_SNAKE_CASE__ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = F.to_tensor(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
logger.info("""Converting model...""" )
# load original state dict
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE__ : Optional[int] = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ : Tuple = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = 15
SCREAMING_SNAKE_CASE__ : Any = 2
SCREAMING_SNAKE_CASE__ : str = {0: """table""", 1: """table rotated"""}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE__ : Tuple = 125
SCREAMING_SNAKE_CASE__ : str = 6
SCREAMING_SNAKE_CASE__ : List[Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
SCREAMING_SNAKE_CASE__ : Any = idalabel
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
SCREAMING_SNAKE_CASE__ : Tuple = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE__ : Dict = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
SCREAMING_SNAKE_CASE__ : Tuple = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = Image.open(__lowerCAmelCase ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Dict = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : List[Any] = (1, 15, 3)
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
SCREAMING_SNAKE_CASE__ : Dict = (1, 125, 7)
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
SCREAMING_SNAKE_CASE__ : List[Any] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
a :Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a :int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 680 | 0 |
'''simple docstring'''
from timeit import timeit
def SCREAMING_SNAKE_CASE ( a_ : Optional[Any] ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__a = 0
while number:
number &= number - 1
result += 1
return result
def SCREAMING_SNAKE_CASE ( a_ : List[str] ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__a = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def SCREAMING_SNAKE_CASE ( ):
def do_benchmark(a_ : Dict ) -> None:
__a = """import __main__ as z"""
print(f"Benchmark when {number = }:" )
print(f"{get_set_bits_count_using_modulo_operator(__lowerCAmelCase ) = }" )
__a = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__lowerCAmelCase )
print(f"timeit() runs in {timing} seconds" )
print(f"{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCAmelCase ) = }" )
__a = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__lowerCAmelCase , )
print(f"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 539 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=0 , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE__ : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = type_vocab_size
SCREAMING_SNAKE_CASE__ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Any = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : Any = scope
SCREAMING_SNAKE_CASE__ : int = projection_dim
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE__ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
SCREAMING_SNAKE_CASE__ : str = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRContextEncoder(config=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : str = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFDPRQuestionEncoder(config=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : List[str] = model(_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFDPRReader(config=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE :int = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :Dict = False
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFDPRQuestionEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRReader.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE__ : Tuple = model(_a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Any = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 680 | 0 |
import os
import sys
import unittest
lowerCamelCase : str =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase : Dict =os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase : Optional[Any] =os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __a ( unittest.TestCase ):
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = get_test_to_tester_mapping(_a )
UpperCamelCase__ : Union[str, Any] = get_test_to_tester_mapping(_a )
UpperCamelCase__ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
UpperCamelCase__ : Tuple = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_a ) , _a )
self.assertEqual(get_test_info.to_json(_a ) , _a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = get_model_to_test_mapping(_a )
UpperCamelCase__ : List[str] = get_model_to_test_mapping(_a )
UpperCamelCase__ : List[str] = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
UpperCamelCase__ : int = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_a ) , _a )
self.assertEqual(get_test_info.to_json(_a ) , _a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = get_model_to_tester_mapping(_a )
UpperCamelCase__ : Optional[Any] = get_model_to_tester_mapping(_a )
UpperCamelCase__ : Optional[Any] = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
UpperCamelCase__ : Union[str, Any] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_a ) , _a )
self.assertEqual(get_test_info.to_json(_a ) , _a ) | 228 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :torch.FloatTensor
_SCREAMING_SNAKE_CASE :Optional[torch.FloatTensor] = None
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=0.999 , __lowerCAmelCase="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = 1
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_001 , _a = 0.02 , _a = "linear" , _a = None , _a = True , _a = True , _a = 0 , _a = "epsilon" , _a = 1.0 , **_a , ) -> Dict:
"""simple docstring"""
if kwargs.get("""set_alpha_to_one""" , _a ) is not None:
SCREAMING_SNAKE_CASE__ : Tuple = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , _a , standard_warn=_a )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE__ : Optional[int] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE__ : Tuple = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ : Tuple = 1.0
# setable values
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[str] = torch.from_numpy(np.arange(0 , _a ).copy().astype(np.intaa ) )
def _a ( self , _a , _a = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _a ( self , _a , _a = None ) -> Optional[int]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE__ : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ : str = (np.arange(0 , _a ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(_a ).to(_a )
self.timesteps += self.config.steps_offset
def _a ( self , _a , _a , _a , _a = 0.0 , _a = False , _a = None , _a = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE__ : Optional[int] = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ : Optional[int] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ : List[Any] = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ : Dict = model_output
SCREAMING_SNAKE_CASE__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE__ : str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_a , pred_original_sample=_a )
def __len__( self ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps
| 680 | 0 |
class A:
'''simple docstring'''
def __init__( self : List[str] , A_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = n
lowerCamelCase_ = [None] * self.n
lowerCamelCase_ = 0 # index of the first element
lowerCamelCase_ = 0
lowerCamelCase_ = 0
def __len__( self : Optional[int] ) -> int:
"""simple docstring"""
return self.size
def a__ ( self : str ) -> bool:
"""simple docstring"""
return self.size == 0
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def a__ ( self : Any , A_ : Tuple ) -> int:
"""simple docstring"""
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowerCamelCase_ = data
lowerCamelCase_ = (self.rear + 1) % self.n
self.size += 1
return self
def a__ ( self : str ) -> Dict:
"""simple docstring"""
if self.size == 0:
raise Exception('UNDERFLOW' )
lowerCamelCase_ = self.array[self.front]
lowerCamelCase_ = None
lowerCamelCase_ = (self.front + 1) % self.n
self.size -= 1
return temp
| 70 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a :Union[str, Any] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 680 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Dict = len(__lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCAmelCase__ : Optional[int] = i + 1
else:
UpperCAmelCase__ : str = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 11, 15], 9) = }')
| 110 |
"""simple docstring"""
import math
import os
import sys
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
try:
with open(__lowerCAmelCase , """rb""" ) as binary_file:
SCREAMING_SNAKE_CASE__ : Optional[int] = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : Dict = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
lexicon.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = last_match_id
if math.loga(__lowerCAmelCase ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE__ : Dict = """0""" + lexicon[curr_key]
SCREAMING_SNAKE_CASE__ : str = bin(__lowerCAmelCase )[2:]
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Dict = {"""0""": """0""", """1""": """1"""}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = """""", """"""
SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
index += 1
SCREAMING_SNAKE_CASE__ : List[str] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE__ : List[Any] = lexicon[curr_string]
result += last_match_id
return result
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Any = os.path.getsize(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bin(__lowerCAmelCase )[2:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__ : Optional[int] = 8
try:
with open(__lowerCAmelCase , """wb""" ) as opened_file:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__ : Dict = read_file_binary(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = compress_data(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = add_file_length(__lowerCAmelCase , __lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 680 | 0 |
"""simple docstring"""
import functools
from typing import Any
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
lowerCAmelCase : dict[str, Any] = {}
lowerCAmelCase : Optional[Any] = """WORD_KEEPER"""
for word in words:
lowerCAmelCase : Tuple = trie
for c in word:
if c not in trie_node:
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = trie_node[c]
lowerCAmelCase : Dict = True
lowerCAmelCase : List[Any] = len(__lowerCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE : Dict ) -> bool:
if index == len_string:
return True
lowerCAmelCase : str = trie
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase : Optional[int] = trie_node.get(string[i] , __lowerCAmelCase )
if trie_node is None:
return False
if trie_node.get(__lowerCAmelCase , __lowerCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Tuple = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : List[str] = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = processor(images=_a , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : str = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : List[Any] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : Any = processor.post_process_masks(_a , _a , _a )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Dict = processor.post_process_masks(
_a , torch.tensor(_a ) , torch.tensor(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Dict = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Dict = [[1, 0], [0, 1]]
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
@require_vision
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[int] = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Any = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Any = processor(images=_a , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(_a , _a , _a , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_a , tf.convert_to_tensor(_a ) , tf.convert_to_tensor(_a ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Any = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE__ : str = processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = [tf.convert_to_tensor(_a )]
SCREAMING_SNAKE_CASE__ : Dict = [torch.tensor(_a )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : List[str] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : List[Any] = processor.post_process_masks(
_a , _a , _a , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ : List[str] = processor.post_process_masks(
_a , _a , _a , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : int = image_processor(_a , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : Any = processor(images=_a , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_a , return_tensors="""tf""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : str = processor(images=_a , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
| 680 | 0 |
from statistics import mean
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ) -> list:
'''simple docstring'''
A = 0
# Number of processes finished
A = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
A = [0] * no_of_process
# List to include calculation results
A = [0] * no_of_process
# Sort by arrival time.
A = [burst_time[i] for i in np.argsort(__lowerCAmelCase )]
A = [process_name[i] for i in np.argsort(__lowerCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
A = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
A = arrival_time[i]
A = 0
# Index showing the location of the process being performed
A = 0
# Saves the current response ratio.
A = 0
for i in range(0 , __lowerCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
A = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
A = temp
A = i
# Calculate the turn around time
A = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
A = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ) -> list:
'''simple docstring'''
A = [0] * no_of_process
for i in range(0 , __lowerCAmelCase ):
A = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__snake_case :Optional[int] =5
__snake_case :Dict =["A", "B", "C", "D", "E"]
__snake_case :Optional[Any] =[1, 2, 3, 4, 5]
__snake_case :Tuple =[1, 2, 3, 4, 5]
__snake_case :Union[str, Any] =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__snake_case :Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''') | 106 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = LayoutLMTokenizer
_SCREAMING_SNAKE_CASE :Optional[int] = LayoutLMTokenizerFast
_SCREAMING_SNAKE_CASE :str = True
_SCREAMING_SNAKE_CASE :Optional[int] = True
def _a ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _a ( self , **_a ) -> Optional[int]:
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_a )
def _a ( self , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """unwanted, running"""
return input_text, output_text
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
pass
| 680 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase ( UpperCamelCase_ ):
"""simple docstring"""
snake_case_ = """layoutlmv3"""
def __init__( self : str , a_ : Any=5_02_65 , a_ : Union[str, Any]=7_68 , a_ : Optional[Any]=12 , a_ : Dict=12 , a_ : Any=30_72 , a_ : int="gelu" , a_ : str=0.1 , a_ : Dict=0.1 , a_ : Optional[int]=5_12 , a_ : List[str]=2 , a_ : Optional[Any]=0.0_2 , a_ : List[str]=1e-5 , a_ : int=1 , a_ : List[Any]=0 , a_ : Any=2 , a_ : Tuple=10_24 , a_ : Dict=1_28 , a_ : str=1_28 , a_ : Optional[Any]=True , a_ : Any=32 , a_ : Dict=1_28 , a_ : List[str]=64 , a_ : Any=2_56 , a_ : str=True , a_ : Union[str, Any]=True , a_ : Any=True , a_ : List[Any]=2_24 , a_ : str=3 , a_ : int=16 , a_ : int=None , **a_ : Tuple , ):
"""simple docstring"""
super().__init__(
vocab_size=_a , hidden_size=_a , num_hidden_layers=_a , num_attention_heads=_a , intermediate_size=_a , hidden_act=_a , hidden_dropout_prob=_a , attention_probs_dropout_prob=_a , max_position_embeddings=_a , type_vocab_size=_a , initializer_range=_a , layer_norm_eps=_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
lowerCamelCase__ = max_ad_position_embeddings
lowerCamelCase__ = coordinate_size
lowerCamelCase__ = shape_size
lowerCamelCase__ = has_relative_attention_bias
lowerCamelCase__ = rel_pos_bins
lowerCamelCase__ = max_rel_pos
lowerCamelCase__ = has_spatial_attention_bias
lowerCamelCase__ = rel_ad_pos_bins
lowerCamelCase__ = max_rel_ad_pos
lowerCamelCase__ = text_embed
lowerCamelCase__ = visual_embed
lowerCamelCase__ = input_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_size
lowerCamelCase__ = classifier_dropout
class lowercase ( UpperCamelCase_ ):
"""simple docstring"""
snake_case_ = version.parse('1.12' )
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1e-5
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 12
def _UpperCamelCase ( self : Tuple , a_ : Optional[int] , a_ : Union[str, Any] = -1 , a_ : Optional[Any] = -1 , a_ : Any = False , a_ : Any = None , a_ : Optional[Any] = 3 , a_ : Tuple = 40 , a_ : Any = 40 , ):
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , _a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ = processor.tokenizer.num_special_tokens_to_add(_a )
lowerCamelCase__ = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowerCamelCase__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCamelCase__ = self._generate_dummy_images(_a , _a , _a , _a )
lowerCamelCase__ = dict(
processor(
_a , text=_a , boxes=_a , return_tensors=_a , ) )
return inputs
| 165 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a :str = 16
a :Union[str, Any] = 32
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Dict = 8
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a :Dict = mocked_dataloaders # noqa: F811
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
# New Code #
SCREAMING_SNAKE_CASE__ : Optional[int] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Any = config["""lr"""]
SCREAMING_SNAKE_CASE__ : str = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Any = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : int = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Any = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
def _lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
super().__init__(*_a , **_a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowercase( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = {}
if prompt is not None:
__UpperCamelCase = prompt
if generate_kwargs is not None:
__UpperCamelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__UpperCamelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__UpperCamelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return super().__call__(_a , **_a )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
__UpperCamelCase = load_image(_a )
if prompt is not None:
if not isinstance(_a , _a ):
raise ValueError(
f"""Received an invalid text input, got - {type(_a )} - but expected a single string. """
'Note also that one single text can be provided for conditional image to text generation.' )
__UpperCamelCase = self.model.config.model_type
if model_type == "git":
__UpperCamelCase = self.image_processor(images=_a , return_tensors=self.framework )
__UpperCamelCase = self.tokenizer(text=_a , add_special_tokens=_a ).input_ids
__UpperCamelCase = [self.tokenizer.cls_token_id] + input_ids
__UpperCamelCase = torch.tensor(_a ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__UpperCamelCase = self.image_processor(images=_a , header_text=_a , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__UpperCamelCase = self.image_processor(images=_a , return_tensors=self.framework )
__UpperCamelCase = self.tokenizer(_a , return_tensors=self.framework )
model_inputs.update(_a )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
__UpperCamelCase = self.image_processor(images=_a , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__UpperCamelCase = None
return model_inputs
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _a )
and all(x is None for x in model_inputs['input_ids'] )
):
__UpperCamelCase = None
if generate_kwargs is None:
__UpperCamelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__UpperCamelCase = model_inputs.pop(self.model.main_input_name )
__UpperCamelCase = self.model.generate(_a , **_a , **_a )
return model_outputs
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__UpperCamelCase = []
for output_ids in model_outputs:
__UpperCamelCase = {
"""generated_text""": self.tokenizer.decode(
_a , skip_special_tokens=_a , )
}
records.append(_a )
return records
| 383 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a :str = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 680 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
"""simple docstring"""
__A = """informer"""
__A = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "student_t" , __UpperCamelCase = "nll" , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = "mean" , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 64 , __UpperCamelCase = 32 , __UpperCamelCase = 32 , __UpperCamelCase = 2 , __UpperCamelCase = 2 , __UpperCamelCase = 2 , __UpperCamelCase = 2 , __UpperCamelCase = True , __UpperCamelCase = "gelu" , __UpperCamelCase = 0.05 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 0.1 , __UpperCamelCase = 1_00 , __UpperCamelCase = 0.02 , __UpperCamelCase=True , __UpperCamelCase = "prob" , __UpperCamelCase = 5 , __UpperCamelCase = True , **__UpperCamelCase , ):
"""simple docstring"""
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 187 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__lowerCamelCase = 6_37_81_37.0
__lowerCamelCase = 6_35_67_52.31_42_45
__lowerCamelCase = 6_37_81_37
def a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> float:
__magic_name__: Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__magic_name__: Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
__magic_name__: Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__magic_name__: Tuple = haversine_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__magic_name__: List[str] = (b_lata + b_lata) / 2
__magic_name__: Dict = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__magic_name__: Tuple = (sin(__lowerCAmelCase ) ** 2) * (cos(__lowerCAmelCase ) ** 2)
__magic_name__: str = cos(sigma / 2 ) ** 2
__magic_name__: List[str] = (sigma - sin(__lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__magic_name__: int = (cos(__lowerCAmelCase ) ** 2) * (sin(__lowerCAmelCase ) ** 2)
__magic_name__: int = sin(sigma / 2 ) ** 2
__magic_name__: Optional[Any] = (sigma + sin(__lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 |
"""simple docstring"""
from math import factorial
def _lowercase ( __lowerCAmelCase = 100 ) -> int:
return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 680 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = StableUnCLIPPipeline
_SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : int = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_SCREAMING_SNAKE_CASE : List[Any] = False
def lowerCAmelCase ( self : List[Any] ):
__snake_case = 32
__snake_case = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__snake_case = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=_a , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__snake_case = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_a , num_layers=1 , )
torch.manual_seed(0 )
__snake_case = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=_a , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__snake_case = StableUnCLIPImageNormalizer(embedding_dim=_a )
__snake_case = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__snake_case = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_a , layers_per_block=1 , upcast_attention=_a , use_linear_projection=_a , )
torch.manual_seed(0 )
__snake_case = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_a , steps_offset=1 , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL()
__snake_case = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase ( self : Optional[int] , snake_case_ : int , snake_case_ : Dict=0 ):
if str(_a ).startswith("mps" ):
__snake_case = torch.manual_seed(_a )
else:
__snake_case = torch.Generator(device=_a ).manual_seed(_a )
__snake_case = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ):
__snake_case = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=_a )
def lowerCAmelCase ( self : str ):
__snake_case = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_a )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Any ):
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__snake_case = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = torch.Generator(device="cpu" ).manual_seed(0 )
__snake_case = pipe("anime turle" , generator=_a , output_type="np" )
__snake_case = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
def lowerCAmelCase ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__snake_case = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 163 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = data
def __iter__( self ) -> Tuple:
"""simple docstring"""
for element in self.data:
yield element
def _lowercase ( __lowerCAmelCase=True ) -> str:
SCREAMING_SNAKE_CASE__ : str = Accelerator(even_batches=__lowerCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> Optional[int]:
if iterable:
SCREAMING_SNAKE_CASE__ : int = DummyIterableDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = TensorDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : str = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = accelerator.prepare(__lowerCAmelCase )
return dl
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = create_dataloader(accelerator=__lowerCAmelCase , dataset_size=__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowercase ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_accelerator(even_batches=__lowerCAmelCase )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowercase ( ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = create_accelerator(even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ : int = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE__ : List[Any] = output.sum()
loss.backward()
batch_idxs.append(__lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowercase ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Any = create_accelerator(even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.prepare(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ : List[Any] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE__ : str = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : int = create_accelerator(even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : str = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowercase ( ) -> List[str]:
SCREAMING_SNAKE_CASE__ : str = create_accelerator()
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE__ : Optional[int] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = original_state
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __lowercase :
_a = None
_a = False
_a = False
_a = False
_a = None
_a = None
_a = False
_a = False
_a = False
_a = True
_a = None
_a = 1
_a = None
_a = False
_a = None
_a = None
def UpperCamelCase__ ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(_a ) for k, v in self.__dict__.items()} )
| 539 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase = 200_0000 ) -> int:
SCREAMING_SNAKE_CASE__ : int = [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for i in range(__lowerCAmelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 680 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[int]:
if attention_mask is None:
UpperCamelCase__ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ : str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ : Tuple = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowerCAmelCase )
if decoder_head_mask is None:
UpperCamelCase__ : Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCAmelCase )
if cross_attn_head_mask is None:
UpperCamelCase__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __a :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=13 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Optional[Any]=99 , SCREAMING_SNAKE_CASE : Tuple=16 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[Any]="relu" , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Dict=20 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1 , SCREAMING_SNAKE_CASE : Tuple=0 , ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : str = seq_length
UpperCamelCase__ : List[Any] = is_training
UpperCamelCase__ : Optional[Any] = use_labels
UpperCamelCase__ : Union[str, Any] = vocab_size
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = encoder_layerdrop
UpperCamelCase__ : List[Any] = decoder_layerdrop
UpperCamelCase__ : List[str] = max_position_embeddings
UpperCamelCase__ : Optional[int] = eos_token_id
UpperCamelCase__ : str = pad_token_id
UpperCamelCase__ : List[str] = bos_token_id
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : str = self.eos_token_id # Eos Token
UpperCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ : Dict = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ : List[Any] = self.get_config()
UpperCamelCase__ : Dict = prepare_mam_aaa_inputs_dict(_a , _a , _a )
return config, inputs_dict
def __lowercase ( self : List[str] ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = MaMaaaModel(config=_a ).get_decoder().to(_a ).eval()
UpperCamelCase__ : str = inputs_dict["""input_ids"""]
UpperCamelCase__ : Optional[int] = inputs_dict["""attention_mask"""]
UpperCamelCase__ : Union[str, Any] = inputs_dict["""head_mask"""]
# first forward pass
UpperCamelCase__ : Union[str, Any] = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
UpperCamelCase__ : Optional[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ : Optional[Any] = model(_a , attention_mask=_a )["""last_hidden_state"""]
UpperCamelCase__ : List[Any] = model(_a , attention_mask=_a , past_key_values=_a )[
"""last_hidden_state"""
]
# select random slice
UpperCamelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-2 ) )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = MaMaaaModel(config=_a ).to(_a ).eval()
UpperCamelCase__ : List[str] = model(**_a )
UpperCamelCase__ : List[Any] = outputs.encoder_last_hidden_state
UpperCamelCase__ : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(_a )
UpperCamelCase__ : int = MaMaaaEncoder.from_pretrained(_a ).to(_a )
UpperCamelCase__ : Optional[int] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Optional[Any] = model.get_decoder()
decoder.save_pretrained(_a )
UpperCamelCase__ : Any = MaMaaaDecoder.from_pretrained(_a ).to(_a )
UpperCamelCase__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=_a , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __a ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_lowerCAmelCase : Dict = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[int] = False
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = MaMaaaModelTester(self )
UpperCamelCase__ : Optional[Any] = ConfigTester(self , config_class=_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
UpperCamelCase__ : Any = model_class.from_pretrained(_a , output_loading_info=_a )
self.assertEqual(info["missing_keys"] , [] )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase__ : int = model_class(_a )
model.to(_a )
model.eval()
UpperCamelCase__ : str = copy.deepcopy(self._prepare_for_class(_a , _a ) )
if not self.is_encoder_decoder:
UpperCamelCase__ : List[Any] = inputs["""input_ids"""]
del inputs["input_ids"]
else:
UpperCamelCase__ : str = inputs["""input_ids"""]
UpperCamelCase__ : Dict = inputs.get("decoder_input_ids" , _a )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , _a )
UpperCamelCase__ : str = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase__ : int = wte(_a )
else:
UpperCamelCase__ : Dict = wte(_a )
UpperCamelCase__ : str = wte(_a )
with torch.no_grad():
model(**_a )[0]
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Dict = input_dict["""input_ids"""]
UpperCamelCase__ : List[str] = input_ids.ne(1 ).to(_a )
UpperCamelCase__ : str = MaMaaaForConditionalGeneration(_a ).eval().to(_a )
if torch_device == "cuda":
model.half()
model.generate(_a , attention_mask=_a )
model.generate(num_beams=4 , do_sample=_a , early_stopping=_a , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
return torch.tensor(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase )
lowerCamelCase : Dict =1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __a ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Dict ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(_a )
UpperCamelCase__ : Tuple = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCamelCase__ : List[str] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCamelCase__ : Tuple = prepare_mam_aaa_inputs_dict(model.config , _a , _a )
with torch.no_grad():
UpperCamelCase__ : int = model(**_a )[0]
UpperCamelCase__ : List[Any] = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , _a )
# change to expected output here
UpperCamelCase__ : List[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=_a )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(_a )
# change to intended input
UpperCamelCase__ : Tuple = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCamelCase__ : Any = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCamelCase__ : List[Any] = prepare_mam_aaa_inputs_dict(model.config , _a , _a )
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(**_a )[0]
UpperCamelCase__ : Union[str, Any] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _a )
# change to expected output here
UpperCamelCase__ : str = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=_a )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=_a ) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(_a )
UpperCamelCase__ : List[Any] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
UpperCamelCase__ : str = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase__ : str = tokenizer(_a , padding=_a , return_tensors="pt" )
UpperCamelCase__ : Union[str, Any] = model.generate(
input_ids=dct["input_ids"].to(_a ) , attention_mask=dct["attention_mask"].to(_a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
UpperCamelCase__ : Tuple = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
UpperCamelCase__ : str = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_a , skip_special_tokens=_a )
assert generated == expected_en | 228 |
"""simple docstring"""
import numpy as np
import qiskit
def _lowercase ( __lowerCAmelCase = 8 , __lowerCAmelCase = None ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] = np.random.default_rng(seed=__lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE__ : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE__ : List[Any] = rng.integers(2 , size=__lowerCAmelCase )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE__ : Optional[Any] = rng.integers(2 , size=__lowerCAmelCase )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE__ : str = rng.integers(2 , size=__lowerCAmelCase )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qiskit.QuantumCircuit(__lowerCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE__ : str = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE__ : Optional[int] = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1 , seed_simulator=__lowerCAmelCase )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE__ : int = job.result().get_counts(__lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE__ : Optional[Any] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE__ : Optional[int] = gen_key[:key_len] if len(__lowerCAmelCase ) >= key_len else gen_key.ljust(__lowerCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 680 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase_ = grid[0]
for row_n in range(1 , len(__lowerCAmelCase ) ):
lowerCamelCase_ = grid[row_n]
lowerCamelCase_ = fill_row(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase_ = grid[row_n]
return grid[-1][-1]
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Any ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = StableDiffusionInpaintPipeline
_SCREAMING_SNAKE_CASE :Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_SCREAMING_SNAKE_CASE :Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_SCREAMING_SNAKE_CASE :Optional[int] = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE :Dict = frozenset([])
def _a ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _a , _a=0 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_a ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(_a )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInpaintPipeline(**_a )
SCREAMING_SNAKE_CASE__ : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : int = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
SCREAMING_SNAKE_CASE__ : List[str] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _a ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : List[str] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : Dict = PNDMScheduler.from_pretrained(_a , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 680 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=2 , UpperCamelCase_=32 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=4 , UpperCamelCase_=[0, 1, 2, 3] , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=[1, 384, 24, 24] , UpperCamelCase_=True , UpperCamelCase_=None , ):
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Union[str, Any] = patch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = backbone_out_indices
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Union[str, Any] = num_labels
UpperCAmelCase__ : str = backbone_featmap_shape
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Union[str, Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : Any = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __snake_case ( self ):
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_a , backbone_featmap_shape=self.backbone_featmap_shape , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = DPTModel(config=_a )
model.to(_a )
model.eval()
UpperCAmelCase__ : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Dict = self.num_labels
UpperCAmelCase__ : Dict = DPTForDepthEstimation(_a )
model.to(_a )
model.eval()
UpperCAmelCase__ : Any = model(_a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : List[str] = DPTForSemanticSegmentation(_a )
model.to(_a )
model.eval()
UpperCAmelCase__ : Any = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase : Union[str, Any] = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Dict = False
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = DPTModelTester(self )
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __snake_case ( self ):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_a )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : int = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __snake_case ( self ):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_a )
def __snake_case ( self ):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
def __snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : int = True
if model_class in get_values(_a ):
continue
UpperCAmelCase__ : Optional[Any] = model_class(_a )
model.to(_a )
model.train()
UpperCAmelCase__ : int = self._prepare_for_class(_a , _a , return_labels=_a )
UpperCAmelCase__ : Any = model(**_a ).loss
loss.backward()
def __snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = True
if model_class in get_values(_a ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase__ : Union[str, Any] = model_class(_a )
model.to(_a )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase__ : Tuple = self._prepare_for_class(_a , _a , return_labels=_a )
UpperCAmelCase__ : str = model(**_a ).loss
loss.backward()
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[str] = _config_zero_init(_a )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(config=_a )
# Skip the check for the backbone
UpperCAmelCase__ : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase__ : Tuple = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __snake_case ( self ):
pass
@slow
def __snake_case ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase__ : int = DPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __snake_case ( self ):
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : int = """add"""
with self.assertRaises(_a ):
UpperCAmelCase__ : str = DPTForDepthEstimation(_a )
def lowerCamelCase ( ):
UpperCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class a ( unittest.TestCase ):
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
UpperCAmelCase__ : int = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_a )
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : List[str] = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**_a )
UpperCAmelCase__ : List[str] = outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase__ : Dict = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _a )
UpperCAmelCase__ : str = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _a , atol=1E-4 ) )
| 110 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a :str = logging.getLogger(__name__)
def _lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=__lowerCAmelCase , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=__lowerCAmelCase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=__lowerCAmelCase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=__lowerCAmelCase , default="""data/dump""" , help="""The dump file prefix.""" )
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE__ : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
SCREAMING_SNAKE_CASE__ : int = fp.readlines()
logger.info("""Start encoding""" )
logger.info(F'''{len(__lowerCAmelCase )} examples to process.''' )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1_0000
SCREAMING_SNAKE_CASE__ : Dict = time.time()
for text in data:
SCREAMING_SNAKE_CASE__ : Dict = F'''{bos} {text.strip()} {sep}'''
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
rslt.append(__lowerCAmelCase )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE__ : str = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
SCREAMING_SNAKE_CASE__ : Tuple = time.time()
logger.info("""Finished binarization""" )
logger.info(F'''{len(__lowerCAmelCase )} examples processed.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
SCREAMING_SNAKE_CASE__ : Tuple = [np.uintaa(__lowerCAmelCase ) for d in rslt]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [np.intaa(__lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__lowerCAmelCase , """wb""" ) as handle:
pickle.dump(rslt_ , __lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 680 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase : Dict = model_type_to_module_name(__lowerCAmelCase )
lowerCAmelCase : Dict = importlib.import_module(f""".{module_name}""" , "transformers.models" )
try:
return getattr(__lowerCAmelCase , __lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__lowerCAmelCase , "__name__" , __lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase : Any = importlib.import_module("transformers" )
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
return getattr(__lowerCAmelCase , __lowerCAmelCase )
return None
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : Optional[Any] = False , SCREAMING_SNAKE_CASE : Union[str, Any] = False , SCREAMING_SNAKE_CASE : List[str] = None , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : Any = None , SCREAMING_SNAKE_CASE : Union[str, Any] = False , **SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(__lowerCAmelCase , encoding="utf-8" ) as reader:
return json.load(__lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_a )
def lowercase__ ( cls , snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = kwargs.pop("config" , _a )
lowerCAmelCase : List[Any] = kwargs.pop("trust_remote_code" , _a )
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : List[str] = ImageProcessingMixin.get_image_processor_dict(_a , **_a )
lowerCAmelCase : List[Any] = config_dict.get("image_processor_type" , _a )
lowerCAmelCase : Union[str, Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
lowerCAmelCase : Union[str, Any] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCAmelCase : List[str] = config_dict.pop("feature_extractor_type" , _a )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
lowerCAmelCase : List[Any] = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowerCAmelCase : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
lowerCAmelCase : Union[str, Any] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_a , _a ):
lowerCAmelCase : str = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.image_processor_type``
lowerCAmelCase : List[Any] = getattr(_a , "image_processor_type" , _a )
if hasattr(_a , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
lowerCAmelCase : Dict = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
lowerCAmelCase : Any = image_processor_class_from_name(_a )
lowerCAmelCase : Union[str, Any] = image_processor_auto_map is not None
lowerCAmelCase : Optional[Any] = image_processor_class is not None or type(_a ) in IMAGE_PROCESSOR_MAPPING
lowerCAmelCase : int = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
lowerCAmelCase : Optional[int] = get_class_from_dynamic_module(
_a , _a , **_a )
lowerCAmelCase : Optional[int] = kwargs.pop("code_revision" , _a )
if os.path.isdir(_a ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_a , **_a )
elif image_processor_class is not None:
return image_processor_class.from_dict(_a , **_a )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_a ) in IMAGE_PROCESSOR_MAPPING:
lowerCAmelCase : Tuple = IMAGE_PROCESSOR_MAPPING[type(_a )]
return image_processor_class.from_dict(_a , **_a )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(_a , _a )
| 645 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a :List[Any] = ""
a :Union[str, Any] = ""
a :List[str] = ""
a :str = 1 # (0 is vertical, 1 is horizontal)
def _lowercase ( ) -> None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print("""Processing...""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE__ : List[Any] = random_chars(32 )
SCREAMING_SNAKE_CASE__ : List[str] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
SCREAMING_SNAKE_CASE__ : List[str] = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
SCREAMING_SNAKE_CASE__ : int = []
for anno in new_annos[index]:
SCREAMING_SNAKE_CASE__ : Tuple = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[list, list]:
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , """*.txt""" ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
SCREAMING_SNAKE_CASE__ : Dict = in_file.readlines()
SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , F'''{label_name}.jpg''' )
SCREAMING_SNAKE_CASE__ : int = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE__ : Optional[int] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 ) -> tuple[list, list, list]:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for idx in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : str = img_list[idx]
path_list.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = anno_list[idx]
SCREAMING_SNAKE_CASE__ : Tuple = cva.imread(__lowerCAmelCase )
if flip_type == 1:
SCREAMING_SNAKE_CASE__ : int = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ : Optional[int] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
SCREAMING_SNAKE_CASE__ : Any = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ : List[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def _lowercase ( __lowerCAmelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE__ : List[str] = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 680 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__snake_case :List[str] ={
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Dict =["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :int =["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Any =[
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] =[
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__snake_case :Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 106 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __a (enum.Enum):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = 0
_SCREAMING_SNAKE_CASE :List[Any] = 1
_SCREAMING_SNAKE_CASE :Dict = 2
@add_end_docstrings(UpperCamelCase_)
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *_a , **_a ) -> Tuple:
"""simple docstring"""
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
SCREAMING_SNAKE_CASE__ : Any = None
if self.model.config.prefix is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self._sanitize_parameters(prefix=_a , **self._forward_params )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {**self._preprocess_params, **preprocess_params}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {**self._forward_params, **forward_params}
def _a ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = {}
if prefix is not None:
SCREAMING_SNAKE_CASE__ : Dict = prefix
if prefix:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Tuple = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""" )
SCREAMING_SNAKE_CASE__ : int = handle_long_generation
preprocess_params.update(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = generate_kwargs
SCREAMING_SNAKE_CASE__ : int = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
SCREAMING_SNAKE_CASE__ : List[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
SCREAMING_SNAKE_CASE__ : Tuple = ReturnType.TENSORS
if return_type is not None:
SCREAMING_SNAKE_CASE__ : int = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self , *_a , **_a ) -> Any:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> Optional[int]:
"""simple docstring"""
return super().__call__(_a , **_a )
def _a ( self , _a , _a="" , _a=None , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Tuple = prompt_text
if handle_long_generation == "hole":
SCREAMING_SNAKE_CASE__ : List[Any] = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = generate_kwargs["""max_new_tokens"""]
else:
SCREAMING_SNAKE_CASE__ : Tuple = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def _a ( self , _a , **_a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_inputs["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.get("""attention_mask""" , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
SCREAMING_SNAKE_CASE__ : Optional[int] = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
SCREAMING_SNAKE_CASE__ : List[str] = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
SCREAMING_SNAKE_CASE__ : int = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
SCREAMING_SNAKE_CASE__ : Dict = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
SCREAMING_SNAKE_CASE__ : Tuple = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = generated_sequence.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : str = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _a ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs["""generated_sequence"""][0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_outputs["""input_ids"""]
SCREAMING_SNAKE_CASE__ : str = model_outputs["""prompt_text"""]
SCREAMING_SNAKE_CASE__ : Any = generated_sequence.numpy().tolist()
SCREAMING_SNAKE_CASE__ : List[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE__ : Tuple = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
SCREAMING_SNAKE_CASE__ : Dict = 0
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
SCREAMING_SNAKE_CASE__ : Tuple = prompt_text + text[prompt_length:]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text[prompt_length:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""generated_text""": all_text}
records.append(_a )
return records
| 680 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , a_ : int , a_ : List[Any]=13 , a_ : Any=10 , a_ : int=3 , a_ : Any=2 , a_ : List[str]=2 , a_ : Union[str, Any]=2 , a_ : str=True , a_ : Union[str, Any]=True , a_ : Union[str, Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Union[str, Any]=37 , a_ : Any="gelu" , a_ : List[str]=0.1 , a_ : Dict=0.1 , a_ : int=10 , a_ : List[Any]=0.0_2 , a_ : int=0.9 , a_ : Dict=None , ):
"""simple docstring"""
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_size
lowerCamelCase__ = tubelet_size
lowerCamelCase__ = num_frames
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = mask_ratio
lowerCamelCase__ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase__ = int(mask_ratio * self.seq_length )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Any , a_ : List[Any] , a_ : Tuple , a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = VideoMAEModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : List[str] , a_ : List[str] , a_ : List[Any] , a_ : List[str] ):
"""simple docstring"""
lowerCamelCase__ = VideoMAEForPreTraining(_a )
model.to(_a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase__ = torch.ones((self.num_masks,) )
lowerCamelCase__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase__ = mask.expand(self.batch_size , -1 ).bool()
lowerCamelCase__ = model(_a , _a )
# model only returns predictions for masked patches
lowerCamelCase__ = mask.sum().item()
lowerCamelCase__ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
snake_case_ = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = VideoMAEModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _UpperCamelCase ( self : Optional[int] , a_ : str , a_ : int , a_ : List[str]=False ):
"""simple docstring"""
lowerCamelCase__ = copy.deepcopy(_a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase__ = torch.ones((self.model_tester.num_masks,) )
lowerCamelCase__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase__ = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCamelCase__ = bool_masked_pos.to(_a )
if return_labels:
if model_class in [
*get_values(_a ),
]:
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(_a )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = VideoMAEModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
for model_class in self.all_model_classes:
lowerCamelCase__ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase__ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(_a , _a ) )
lowerCamelCase__ = outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ = True
lowerCamelCase__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(_a , _a ) )
lowerCamelCase__ = outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase__ = len(_a )
# Check attention is always last and order is fine
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 1 , len(_a ) )
lowerCamelCase__ = outputs.attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
def check_hidden_states_output(a_ : List[str] , a_ : Tuple , a_ : Dict ):
lowerCamelCase__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(_a , _a ) )
lowerCamelCase__ = outputs.hidden_states
lowerCamelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_a ) , _a )
lowerCamelCase__ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowerCamelCase__ = np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
_a )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_video()
lowerCamelCase__ = image_processor(_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**_a )
# verify the logits
lowerCamelCase__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , _a )
lowerCamelCase__ = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(_a )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_video()
lowerCamelCase__ = image_processor(_a , return_tensors="""pt""" ).to(_a )
# add boolean mask, indicating which patches to mask
lowerCamelCase__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowerCamelCase__ = torch.load(_a )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**_a )
# verify the logits
lowerCamelCase__ = torch.Size([1, 14_08, 15_36] )
lowerCamelCase__ = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=_a )
self.assertEqual(outputs.logits.shape , _a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase__ = torch.tensor([0.5_1_4_2] , device=_a )
self.assertTrue(torch.allclose(outputs.loss , _a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=_a ).to(
_a )
with torch.no_grad():
lowerCamelCase__ = model(**_a )
lowerCamelCase__ = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=_a )
self.assertTrue(torch.allclose(outputs.loss , _a , atol=1e-4 ) )
| 165 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> list[float]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Tuple = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__lowerCAmelCase )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__lowerCAmelCase )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__lowerCAmelCase )
if len(__lowerCAmelCase ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(__lowerCAmelCase )} and {rowsa}'''
)
raise ValueError(__lowerCAmelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = table.shape
strictly_diagonally_dominant(__lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = []
for row in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = 0
for col in range(__lowerCAmelCase ):
if col == row:
SCREAMING_SNAKE_CASE__ : int = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : Any = (temp + val) / denom
new_val.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = new_val
return [float(__lowerCAmelCase ) for i in new_val]
def _lowercase ( __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = table.shape
SCREAMING_SNAKE_CASE__ : str = True
for i in range(0 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
def _a ( __lowercase , __lowercase ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Union[str, Path]] = None
_SCREAMING_SNAKE_CASE :bool = False
_SCREAMING_SNAKE_CASE :bool = False
_SCREAMING_SNAKE_CASE :bool = False
_SCREAMING_SNAKE_CASE :Optional[Dict] = None
_SCREAMING_SNAKE_CASE :Optional[str] = None
_SCREAMING_SNAKE_CASE :bool = False
_SCREAMING_SNAKE_CASE :bool = False
_SCREAMING_SNAKE_CASE :bool = False
_SCREAMING_SNAKE_CASE :bool = True
_SCREAMING_SNAKE_CASE :Optional[int] = None
_SCREAMING_SNAKE_CASE :int = 1
_SCREAMING_SNAKE_CASE :Optional[Union[str, bool]] = None
_SCREAMING_SNAKE_CASE :bool = False
_SCREAMING_SNAKE_CASE :Optional[Dict] = None
_SCREAMING_SNAKE_CASE :Optional[str] = None
def _a ( self ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(_a ) for k, v in self.__dict__.items()} )
| 680 | 0 |
import os
from distutils.util import strtobool
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
for e in env_keys:
snake_case_ = int(os.environ.get(__lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def a(lowercase__ , lowercase__=False ):
'''simple docstring'''
snake_case_ = os.environ.get(__lowerCAmelCase , str(__lowerCAmelCase ) )
return strtobool(__lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def a(lowercase__ , lowercase__="no" ):
'''simple docstring'''
snake_case_ = os.environ.get(__lowerCAmelCase , str(__lowerCAmelCase ) )
return value
| 187 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a :Optional[Any] = "<<<<<<< This should probably be modified because it mentions: "
a :Tuple = "=======\n>>>>>>>\n"
a :str = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
a :Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def _lowercase ( __lowerCAmelCase ) -> int:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __a (UpperCamelCase_):
'''simple docstring'''
@staticmethod
def _a ( _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_a , required=_a , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_a , required=_a , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=_a )
def __init__( self , _a , _a , *_a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = get_logger("""datasets-cli/converting""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tfds_path
SCREAMING_SNAKE_CASE__ : List[Any] = datasets_directory
def _a ( self ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
SCREAMING_SNAKE_CASE__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.listdir(_a )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
SCREAMING_SNAKE_CASE__ : int = os.path.join(_a , _a )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(_a , _a )
if not os.path.isfile(_a ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(_a , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = f.readlines()
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Dict = []
for line in lines:
SCREAMING_SNAKE_CASE__ : List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE__ : List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE__ : Optional[Any] = """"""
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE__ : Any = """from datasets import logging\n"""
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE__ : Optional[int] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Tuple = list(filter(lambda _a : e in out_line , _a ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_a ) + """\n""" )
out_lines.append(_a )
out_lines.append(_a )
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE__ : int = re.sub(_a , _a , _a )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE__ : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _a )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
SCREAMING_SNAKE_CASE__ : Dict = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
out_lines.append(_a )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f_name.replace(""".py""" , """""" )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(_a , _a )
os.makedirs(_a , exist_ok=_a )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_a )
if needs_manual_update:
with_manual_update.append(_a )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.writelines(_a )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE__ : str = os.path.basename(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_a , _a )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 680 | 0 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __A :
def __init__( self : Dict ) -> Optional[Any]:
__magic_name__: Dict = {}
def lowerCamelCase__ ( self : str , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any]=1 ) -> Optional[Any]:
if self.graph.get(_a ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__magic_name__: List[Any] = [[w, v]]
if not self.graph.get(_a ):
__magic_name__: Optional[int] = []
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
return list(self.graph )
def lowerCamelCase__ ( self : str , __snake_case : Tuple , __snake_case : List[Any] ) -> Dict:
if self.graph.get(_a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_a )
def lowerCamelCase__ ( self : Tuple , __snake_case : List[str]=-2 , __snake_case : str=-1 ) -> Dict:
if s == d:
return []
__magic_name__: Dict = []
__magic_name__: Dict = []
if s == -2:
__magic_name__: List[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_a ) != 0:
__magic_name__: Optional[int] = stack[len(_a ) - 1]
else:
__magic_name__: Dict = ss
# check if se have reached the starting point
if len(_a ) == 0:
return visited
def lowerCamelCase__ ( self : str , __snake_case : Optional[Any]=-1 ) -> Optional[Any]:
if c == -1:
__magic_name__: Optional[int] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
__magic_name__: Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(_a , _a , 1 )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Any=-2 ) -> Tuple:
__magic_name__: Dict = deque()
__magic_name__: Optional[Any] = []
if s == -2:
__magic_name__: Any = list(self.graph )[0]
d.append(_a )
visited.append(_a )
while d:
__magic_name__: Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__ ( self : Tuple , __snake_case : Optional[Any] ) -> List[Any]:
__magic_name__: Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase__ ( self : int , __snake_case : Optional[int] ) -> Tuple:
return len(self.graph[u] )
def lowerCamelCase__ ( self : str , __snake_case : Optional[Any]=-2 ) -> int:
__magic_name__: int = []
__magic_name__: Union[str, Any] = []
if s == -2:
__magic_name__: Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: str = s
__magic_name__: Union[str, Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_a ) != 0:
__magic_name__: Tuple = stack[len(_a ) - 1]
else:
__magic_name__: List[str] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return sorted_nodes
def lowerCamelCase__ ( self : str ) -> Dict:
__magic_name__: List[Any] = []
__magic_name__: List[Any] = []
__magic_name__: Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: Union[str, Any] = -2
__magic_name__: Union[str, Any] = []
__magic_name__: List[Any] = s
__magic_name__: Optional[Any] = False
__magic_name__: Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__magic_name__: str = len(_a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__magic_name__: Optional[Any] = True
if len(_a ) != 0:
__magic_name__: Optional[int] = stack[len(_a ) - 1]
else:
__magic_name__: int = False
indirect_parents.append(_a )
__magic_name__: str = s
__magic_name__: Optional[Any] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return list(_a )
def lowerCamelCase__ ( self : Dict ) -> int:
__magic_name__: List[Any] = []
__magic_name__: Optional[Any] = []
__magic_name__: Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: Any = -2
__magic_name__: Any = []
__magic_name__: Optional[Any] = s
__magic_name__: Optional[int] = False
__magic_name__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__magic_name__: int = len(_a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__magic_name__: int = True
if len(_a ) != 0:
__magic_name__: List[str] = stack[len(_a ) - 1]
else:
__magic_name__: str = False
indirect_parents.append(_a )
__magic_name__: Union[str, Any] = s
__magic_name__: List[str] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return False
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[Any]=-2 , __snake_case : Optional[int]=-1 ) -> Optional[int]:
__magic_name__: Optional[Any] = time()
self.dfs(_a , _a )
__magic_name__: List[str] = time()
return end - begin
def lowerCamelCase__ ( self : List[Any] , __snake_case : int=-2 ) -> str:
__magic_name__: List[str] = time()
self.bfs(_a )
__magic_name__: Optional[Any] = time()
return end - begin
class __A :
def __init__( self : int ) -> Union[str, Any]:
__magic_name__: List[Any] = {}
def lowerCamelCase__ ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Optional[int]=1 ) -> List[str]:
if self.graph.get(_a ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__magic_name__: str = [[w, v]]
# add the other way
if self.graph.get(_a ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__magic_name__: str = [[w, u]]
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Any , __snake_case : Optional[Any] ) -> Optional[int]:
if self.graph.get(_a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_a )
# the other way round
if self.graph.get(_a ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_a )
def lowerCamelCase__ ( self : int , __snake_case : List[str]=-2 , __snake_case : Tuple=-1 ) -> Dict:
if s == d:
return []
__magic_name__: Optional[Any] = []
__magic_name__: Any = []
if s == -2:
__magic_name__: Any = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_a ) != 0:
__magic_name__: Optional[Any] = stack[len(_a ) - 1]
else:
__magic_name__: List[Any] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return visited
def lowerCamelCase__ ( self : Any , __snake_case : Dict=-1 ) -> Any:
if c == -1:
__magic_name__: List[str] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
__magic_name__: Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(_a , _a , 1 )
def lowerCamelCase__ ( self : Tuple , __snake_case : Any=-2 ) -> List[str]:
__magic_name__: Union[str, Any] = deque()
__magic_name__: Union[str, Any] = []
if s == -2:
__magic_name__: Optional[Any] = list(self.graph )[0]
d.append(_a )
visited.append(_a )
while d:
__magic_name__: Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__ ( self : int , __snake_case : Tuple ) -> Union[str, Any]:
return len(self.graph[u] )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: List[Any] = []
__magic_name__: Any = []
__magic_name__: List[str] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: List[Any] = -2
__magic_name__: Any = []
__magic_name__: Union[str, Any] = s
__magic_name__: Optional[int] = False
__magic_name__: Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__magic_name__: Optional[int] = len(_a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__magic_name__: Any = True
if len(_a ) != 0:
__magic_name__: Dict = stack[len(_a ) - 1]
else:
__magic_name__: Tuple = False
indirect_parents.append(_a )
__magic_name__: Optional[Any] = s
__magic_name__: str = ss
# check if se have reached the starting point
if len(_a ) == 0:
return list(_a )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__: Any = []
__magic_name__: Optional[Any] = []
__magic_name__: Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
__magic_name__: List[Any] = -2
__magic_name__: List[str] = []
__magic_name__: Union[str, Any] = s
__magic_name__: List[Any] = False
__magic_name__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__magic_name__: str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__magic_name__: Union[str, Any] = len(_a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__magic_name__: Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__magic_name__: int = True
if len(_a ) != 0:
__magic_name__: Union[str, Any] = stack[len(_a ) - 1]
else:
__magic_name__: List[Any] = False
indirect_parents.append(_a )
__magic_name__: Optional[Any] = s
__magic_name__: Optional[int] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return False
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
return list(self.graph )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[int]=-2 , __snake_case : Tuple=-1 ) -> Optional[int]:
__magic_name__: Union[str, Any] = time()
self.dfs(_a , _a )
__magic_name__: Dict = time()
return end - begin
def lowerCamelCase__ ( self : Dict , __snake_case : Dict=-2 ) -> Any:
__magic_name__: Optional[Any] = time()
self.bfs(_a )
__magic_name__: Any = time()
return end - begin
| 96 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a :str = 637_8137.0
a :Optional[Any] = 635_6752.31_4245
a :List[Any] = 6_378_137
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
SCREAMING_SNAKE_CASE__ : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
SCREAMING_SNAKE_CASE__ : Tuple = haversine_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
SCREAMING_SNAKE_CASE__ : List[str] = (b_lata + b_lata) / 2
SCREAMING_SNAKE_CASE__ : Dict = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
SCREAMING_SNAKE_CASE__ : Tuple = (sin(__lowerCAmelCase ) ** 2) * (cos(__lowerCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE__ : str = cos(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE__ : List[str] = (sigma - sin(__lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
SCREAMING_SNAKE_CASE__ : int = (cos(__lowerCAmelCase ) ** 2) * (sin(__lowerCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE__ : int = sin(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = (sigma + sin(__lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : Dict , snake_case_ : str , snake_case_ : int=13 , snake_case_ : Union[str, Any]=32 , snake_case_ : Optional[Any]=3 , snake_case_ : Dict=4 , snake_case_ : Any=[10, 20, 30, 40] , snake_case_ : Tuple=[2, 2, 3, 2] , snake_case_ : int=True , snake_case_ : List[str]=True , snake_case_ : List[str]=37 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : Dict=10 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Any=["stage2", "stage3", "stage4"] , snake_case_ : Union[str, Any]=3 , snake_case_ : List[str]=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = out_features
__snake_case = num_labels
__snake_case = scope
__snake_case = num_stages
def lowerCAmelCase ( self : int ):
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : int ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowerCAmelCase ( self : int ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_a , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowerCAmelCase ( self : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : List[str] ):
__snake_case = UperNetForSemanticSegmentation(config=_a )
model.to(_a )
model.eval()
__snake_case = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase ( self : Optional[int] ):
__snake_case = self.prepare_config_and_inputs()
(
__snake_case
) = config_and_inputs
__snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[Any] = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
def lowerCAmelCase ( self : str ):
__snake_case = UperNetModelTester(self )
__snake_case = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : List[str] ):
return
def lowerCAmelCase ( self : Union[str, Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_a )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def lowerCAmelCase ( self : List[Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def lowerCAmelCase ( self : Optional[Any] ):
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def lowerCAmelCase ( self : int ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def lowerCAmelCase ( self : Optional[int] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase ( self : Any ):
pass
def lowerCAmelCase ( self : Optional[int] ):
def check_hidden_states_output(snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
__snake_case = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(_a , _a ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(_a , _a , _a )
def lowerCAmelCase ( self : Tuple ):
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(_a )
__snake_case = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__snake_case = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def lowerCAmelCase ( self : str ):
pass
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = UperNetForSemanticSegmentation.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
__snake_case = Image.open(__lowerCAmelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[Any] ):
__snake_case = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
__snake_case = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(_a )
__snake_case = prepare_img()
__snake_case = processor(images=_a , return_tensors="pt" ).to(_a )
with torch.no_grad():
__snake_case = model(**_a )
__snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _a )
__snake_case = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1e-4 ) )
def lowerCAmelCase ( self : Dict ):
__snake_case = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
__snake_case = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(_a )
__snake_case = prepare_img()
__snake_case = processor(images=_a , return_tensors="pt" ).to(_a )
with torch.no_grad():
__snake_case = model(**_a )
__snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _a )
__snake_case = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1e-4 ) )
| 163 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a :Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a :str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
SCREAMING_SNAKE_CASE__ : Dict = value
else:
SCREAMING_SNAKE_CASE__ : Tuple = value
return new_state_dict
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : str = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE__ : Any = in_proj_bias[:256]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[:256]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias_cross_attn[-256:]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = 800 if """detection""" in checkpoint_url else 1000
SCREAMING_SNAKE_CASE__ : List[str] = target_max_size / current_max_size
SCREAMING_SNAKE_CASE__ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = F.to_tensor(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
logger.info("""Converting model...""" )
# load original state dict
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE__ : Optional[int] = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ : Tuple = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = 15
SCREAMING_SNAKE_CASE__ : Any = 2
SCREAMING_SNAKE_CASE__ : str = {0: """table""", 1: """table rotated"""}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE__ : Tuple = 125
SCREAMING_SNAKE_CASE__ : str = 6
SCREAMING_SNAKE_CASE__ : List[Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
SCREAMING_SNAKE_CASE__ : Any = idalabel
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
SCREAMING_SNAKE_CASE__ : Tuple = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE__ : Dict = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
SCREAMING_SNAKE_CASE__ : Tuple = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = Image.open(__lowerCAmelCase ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Dict = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : List[Any] = (1, 15, 3)
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
SCREAMING_SNAKE_CASE__ : Dict = (1, 125, 7)
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
SCREAMING_SNAKE_CASE__ : List[Any] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
a :Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a :int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 680 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowercase ( UpperCamelCase_ ):
_a = """mobilenet_v2"""
def __init__( self , UpperCamelCase=3 , UpperCamelCase=224 , UpperCamelCase=1.0 , UpperCamelCase=8 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=32 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="relu6" , UpperCamelCase=True , UpperCamelCase=0.8 , UpperCamelCase=0.02 , UpperCamelCase=0.001 , UpperCamelCase=255 , **UpperCamelCase , ) -> int:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = depth_divisible_by
__a = min_depth
__a = expand_ratio
__a = output_stride
__a = first_layer_is_expansion
__a = finegrained_output
__a = hidden_act
__a = tf_padding
__a = classifier_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = semantic_loss_ignore_index
class __lowercase ( UpperCamelCase_ ):
_a = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1e-4
| 539 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , _a=0 , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE__ : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = type_vocab_size
SCREAMING_SNAKE_CASE__ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Any = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : Any = scope
SCREAMING_SNAKE_CASE__ : int = projection_dim
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE__ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
SCREAMING_SNAKE_CASE__ : str = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRContextEncoder(config=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : str = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFDPRQuestionEncoder(config=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : List[str] = model(_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFDPRReader(config=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , attention_mask=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE :int = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :Dict = False
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFDPRContextEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFDPRQuestionEncoder.from_pretrained(_a )
self.assertIsNotNone(_a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFDPRReader.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE__ : Tuple = model(_a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Any = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 680 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase : Dict = KandinskyVaaPipeline
_lowerCAmelCase : List[Any] = [
"""image_embeds""",
"""negative_image_embeds""",
]
_lowerCAmelCase : Any = ["""image_embeds""", """negative_image_embeds"""]
_lowerCAmelCase : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCAmelCase : List[Any] = False
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return 1_00
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase__ : List[str] = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.dummy_unet
UpperCamelCase__ : List[Any] = self.dummy_movq
UpperCamelCase__ : str = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type="epsilon" , thresholding=_a , )
UpperCamelCase__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=0 ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
if str(_a ).startswith("mps" ):
UpperCamelCase__ : Optional[int] = torch.manual_seed(_a )
else:
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = """cpu"""
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**_a )
UpperCamelCase__ : List[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ : str = pipe(**self.get_dummy_inputs(_a ) )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : Dict = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
UpperCamelCase__ : Dict = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : int = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase__ : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
UpperCamelCase__ : Optional[Any] = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase__ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
UpperCamelCase__ : Tuple = """red cat, 4k photo"""
UpperCamelCase__ : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase__ : Optional[Any] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase__ : Union[str, Any] = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase__ : Tuple = pipeline(
image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_00 , output_type="np" , )
UpperCamelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_a , _a ) | 228 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :torch.FloatTensor
_SCREAMING_SNAKE_CASE :Optional[torch.FloatTensor] = None
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=0.999 , __lowerCAmelCase="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = 1
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_001 , _a = 0.02 , _a = "linear" , _a = None , _a = True , _a = True , _a = 0 , _a = "epsilon" , _a = 1.0 , **_a , ) -> Dict:
"""simple docstring"""
if kwargs.get("""set_alpha_to_one""" , _a ) is not None:
SCREAMING_SNAKE_CASE__ : Tuple = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , _a , standard_warn=_a )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE__ : Optional[int] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE__ : Tuple = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ : Tuple = 1.0
# setable values
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[str] = torch.from_numpy(np.arange(0 , _a ).copy().astype(np.intaa ) )
def _a ( self , _a , _a = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _a ( self , _a , _a = None ) -> Optional[int]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE__ : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ : str = (np.arange(0 , _a ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(_a ).to(_a )
self.timesteps += self.config.steps_offset
def _a ( self , _a , _a , _a , _a = 0.0 , _a = False , _a = None , _a = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE__ : Optional[int] = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ : Optional[int] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ : List[Any] = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ : Dict = model_output
SCREAMING_SNAKE_CASE__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE__ : str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_a , pred_original_sample=_a )
def __len__( self ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps
| 680 | 0 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
lowerCamelCase : Dict = False
lowerCamelCase : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
return TrainCommand(__lowerCAmelCase )
class A( UpperCamelCase_ ):
'''simple docstring'''
@staticmethod
def a__ ( A_ : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=_a , required=_a , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_a , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=_a , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=_a , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=_a , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=_a , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_a , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=_a , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=_a , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=_a , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=_a , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=_a , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=_a , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=_a )
def __init__( self : List[str] , A_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = logging.get_logger('transformers-cli/training' )
lowerCamelCase_ = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=_a )
lowerCamelCase_ = args.output
lowerCamelCase_ = args.column_label
lowerCamelCase_ = args.column_text
lowerCamelCase_ = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
lowerCamelCase_ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
lowerCamelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase_ = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
lowerCamelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase_ = args.validation_split
lowerCamelCase_ = args.train_batch_size
lowerCamelCase_ = args.valid_batch_size
lowerCamelCase_ = args.learning_rate
lowerCamelCase_ = args.adam_epsilon
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
raise NotImplementedError
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 70 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a :Union[str, Any] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 680 | 0 |
"""simple docstring"""
from math import sqrt
def lowerCamelCase ( _snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(sqrt(__lowerCAmelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( _snake_case = 10001 ):
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'{solution() = }')
| 110 |
"""simple docstring"""
import math
import os
import sys
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
try:
with open(__lowerCAmelCase , """rb""" ) as binary_file:
SCREAMING_SNAKE_CASE__ : Optional[int] = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : Dict = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
lexicon.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = last_match_id
if math.loga(__lowerCAmelCase ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE__ : Dict = """0""" + lexicon[curr_key]
SCREAMING_SNAKE_CASE__ : str = bin(__lowerCAmelCase )[2:]
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Dict = {"""0""": """0""", """1""": """1"""}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = """""", """"""
SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
index += 1
SCREAMING_SNAKE_CASE__ : List[str] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE__ : List[Any] = lexicon[curr_string]
result += last_match_id
return result
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Any = os.path.getsize(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bin(__lowerCAmelCase )[2:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__ : Optional[int] = 8
try:
with open(__lowerCAmelCase , """wb""" ) as opened_file:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__ : Dict = read_file_binary(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = compress_data(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = add_file_length(__lowerCAmelCase , __lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 680 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a : torch.FloatTensor
a : Optional[torch.FloatTensor] =None
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str=0.999 , SCREAMING_SNAKE_CASE : Dict="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCAmelCase : List[Any] = []
for i in range(__lowerCAmelCase ):
lowerCAmelCase : List[str] = i / num_diffusion_timesteps
lowerCAmelCase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
a : List[Any] =1
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = True , snake_case__ = True , snake_case__ = 0 , snake_case__ = "epsilon" , snake_case__ = 1.0 , **snake_case__ , ):
"""simple docstring"""
if kwargs.get("set_alpha_to_one" , _a ) is not None:
lowerCAmelCase : Tuple = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("set_alpha_to_one" , "1.0.0" , _a , standard_warn=_a )
lowerCAmelCase : Tuple = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
lowerCAmelCase : Dict = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase : Union[str, Any] = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase : Optional[int] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase : Tuple = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowerCAmelCase : Optional[int] = 1.0 - self.betas
lowerCAmelCase : List[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowerCAmelCase : Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowerCAmelCase : Tuple = 1.0
# setable values
lowerCAmelCase : Dict = None
lowerCAmelCase : List[str] = torch.from_numpy(np.arange(0 , _a ).copy().astype(np.intaa ) )
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowerCAmelCase : List[str] = num_inference_steps
lowerCAmelCase : Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase : str = (np.arange(0 , _a ) * step_ratio).round().copy().astype(np.intaa )
lowerCAmelCase : Tuple = torch.from_numpy(_a ).to(_a )
self.timesteps += self.config.steps_offset
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 0.0 , snake_case__ = False , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowerCAmelCase : Optional[int] = self.alphas_cumprod[timestep]
lowerCAmelCase : Optional[int] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowerCAmelCase : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "sample":
lowerCAmelCase : Dict = model_output
lowerCAmelCase : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowerCAmelCase : str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_a , pred_original_sample=_a )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 645 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Tuple = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : List[str] = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = processor(images=_a , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : str = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : List[Any] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : Any = processor.post_process_masks(_a , _a , _a )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Dict = processor.post_process_masks(
_a , torch.tensor(_a ) , torch.tensor(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Dict = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Dict = [[1, 0], [0, 1]]
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Tuple = processor.post_process_masks(_a , np.array(_a ) , np.array(_a ) )
@require_vision
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[int] = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Any = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Any = processor(images=_a , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(_a , _a , _a , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_a , tf.convert_to_tensor(_a ) , tf.convert_to_tensor(_a ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
SCREAMING_SNAKE_CASE__ : Any = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE__ : str = processor.post_process_masks(
_a , np.array(_a ) , np.array(_a ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = SamImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = SamProcessor(_a )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_a ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = [tf.convert_to_tensor(_a )]
SCREAMING_SNAKE_CASE__ : Dict = [torch.tensor(_a )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1_764, 2_646]]
SCREAMING_SNAKE_CASE__ : List[str] = [[683, 1_024]]
SCREAMING_SNAKE_CASE__ : List[Any] = processor.post_process_masks(
_a , _a , _a , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ : List[str] = processor.post_process_masks(
_a , _a , _a , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = SamProcessor(image_processor=_a )
SCREAMING_SNAKE_CASE__ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : int = image_processor(_a , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : Any = processor(images=_a , return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(_a , return_tensors="""tf""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE__ : str = processor(images=_a , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
self.assertTrue(np.allclose(_a , _a ) )
| 680 | 0 |
import math
import os
import sys
def lowerCamelCase_ ( lowerCAmelCase__ : int ) -> str:
'''simple docstring'''
A = """"""
try:
with open(__lowerCAmelCase , 'rb' ) as binary_file:
A = binary_file.read()
for dat in data:
A = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> None:
'''simple docstring'''
lexicon.pop(__lowerCAmelCase )
A = last_match_id
if math.loga(__lowerCAmelCase ).is_integer():
for curr_key in lexicon:
A = """0""" + lexicon[curr_key]
A = bin(__lowerCAmelCase )[2:]
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple ) -> str:
'''simple docstring'''
A = {"""0""": """0""", """1""": """1"""}
A = """""", """"""
A = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
index += 1
A = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int ) -> str:
'''simple docstring'''
A = os.path.getsize(__lowerCAmelCase )
A = bin(__lowerCAmelCase )[2:]
A = len(__lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> None:
'''simple docstring'''
A = 8
try:
with open(__lowerCAmelCase , 'wb' ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> None:
'''simple docstring'''
A = read_file_binary(__lowerCAmelCase )
A = compress_data(__lowerCAmelCase )
A = add_file_length(__lowerCAmelCase , __lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 106 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = LayoutLMTokenizer
_SCREAMING_SNAKE_CASE :Optional[int] = LayoutLMTokenizerFast
_SCREAMING_SNAKE_CASE :str = True
_SCREAMING_SNAKE_CASE :Optional[int] = True
def _a ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _a ( self , **_a ) -> Optional[int]:
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_a )
def _a ( self , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """unwanted, running"""
return input_text, output_text
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
pass
| 680 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowercase ( UpperCamelCase_ ):
"""simple docstring"""
snake_case_ = """philschmid/bart-large-cnn-samsum"""
snake_case_ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
snake_case_ = """summarizer"""
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ["""text"""]
snake_case_ = ["""text"""]
def _UpperCamelCase ( self : List[str] , a_ : str ):
"""simple docstring"""
return self.pre_processor(_a , return_tensors="""pt""" , truncation=_a )
def _UpperCamelCase ( self : Optional[Any] , a_ : Dict ):
"""simple docstring"""
return self.model.generate(**_a )[0]
def _UpperCamelCase ( self : Tuple , a_ : Dict ):
"""simple docstring"""
return self.pre_processor.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
| 165 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a :str = 16
a :Union[str, Any] = 32
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Dict = 8
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a :Dict = mocked_dataloaders # noqa: F811
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
# New Code #
SCREAMING_SNAKE_CASE__ : Optional[int] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Any = config["""lr"""]
SCREAMING_SNAKE_CASE__ : str = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Any = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : int = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Any = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
def _lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> str:
__UpperCamelCase = psutil.Process()
__UpperCamelCase = False
def __lowercase( self ) -> str:
__UpperCamelCase = -1
while True:
__UpperCamelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __lowercase( self ) -> Any:
__UpperCamelCase = True
__UpperCamelCase = threading.Thread(target=self.peak_monitor )
__UpperCamelCase = True
self.thread.start()
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = False
self.thread.join()
return self.cpu_memory_peak
_snake_case = PeakCPUMemory()
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__UpperCamelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__UpperCamelCase = torch.cuda.memory_allocated(__lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def _a ( __lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__UpperCamelCase = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
__UpperCamelCase = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__UpperCamelCase = (torch.cuda.memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
__UpperCamelCase = (torch.cuda.max_memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
return measures
def _a ( __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__lowerCAmelCase )]:.2f}MiB""" )
__UpperCamelCase = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 383 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a :str = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 680 | 0 |
from string import ascii_uppercase
A = {char: i for i, char in enumerate(ascii_uppercase)}
A = dict(enumerate(ascii_uppercase))
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = len(__lowerCAmelCase )
snake_case_ = 0
while True:
if x == i:
snake_case_ = 0
if len(__lowerCAmelCase ) == len(__lowerCAmelCase ):
break
key += key[i]
i += 1
return key
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = """"""
snake_case_ = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = """"""
snake_case_ = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def a():
'''simple docstring'''
snake_case_ = """THE GERMAN ATTACK"""
snake_case_ = """SECRET"""
snake_case_ = generate_key(__lowerCAmelCase , __lowerCAmelCase )
snake_case_ = cipher_text(__lowerCAmelCase , __lowerCAmelCase )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(__lowerCAmelCase , __lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 187 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> list[int]:
__magic_name__: int = [0] * no_of_processes
__magic_name__: Union[str, Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowerCAmelCase ):
__magic_name__: str = burst_time[i]
__magic_name__: Any = 0
__magic_name__: int = 0
__magic_name__: Tuple = 9_9_9_9_9_9_9_9_9
__magic_name__: List[str] = 0
__magic_name__: Any = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowerCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__magic_name__: Union[str, Any] = remaining_time[j]
__magic_name__: Optional[Any] = j
__magic_name__: Any = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__magic_name__: Any = remaining_time[short]
if minm == 0:
__magic_name__: int = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
__magic_name__: Union[str, Any] = False
# Find finish time of current process
__magic_name__: int = increment_time + 1
# Calculate waiting time
__magic_name__: List[Any] = finish_time - arrival_time[short]
__magic_name__: Dict = finar - burst_time[short]
if waiting_time[short] < 0:
__magic_name__: Tuple = 0
# Increment time
increment_time += 1
return waiting_time
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> list[int]:
__magic_name__: str = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__magic_name__: Tuple = burst_time[i] + waiting_time[i]
return turn_around_time
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> None:
__magic_name__: Optional[Any] = 0
__magic_name__: str = 0
for i in range(__lowerCAmelCase ):
__magic_name__: Tuple = total_waiting_time + waiting_time[i]
__magic_name__: Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
__lowerCamelCase = int(input())
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
__lowerCamelCase = map(int, input().split())
__lowerCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCamelCase = burst_time
__lowerCamelCase = no_of_processes
__lowerCamelCase = waiting_time
__lowerCamelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowerCamelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 96 |
"""simple docstring"""
from math import factorial
def _lowercase ( __lowerCAmelCase = 100 ) -> int:
return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 680 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE = "ResNetConfig"
# Base docstring
_SCREAMING_SNAKE_CASE = "microsoft/resnet-50"
_SCREAMING_SNAKE_CASE = [1, 2_048, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE = "microsoft/resnet-50"
_SCREAMING_SNAKE_CASE = "tiger cat"
_SCREAMING_SNAKE_CASE = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __magic_name__ ( nn.Module ):
def __init__( self : Any , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[int] = 3 , snake_case_ : List[str] = 1 , snake_case_ : Optional[int] = "relu" ):
super().__init__()
__snake_case = nn.Convad(
_a , _a , kernel_size=_a , stride=_a , padding=kernel_size // 2 , bias=_a )
__snake_case = nn.BatchNormad(_a )
__snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self : str , snake_case_ : List[Any] ):
__snake_case = self.convolution(_a )
__snake_case = self.normalization(_a )
__snake_case = self.activation(_a )
return hidden_state
class __magic_name__ ( nn.Module ):
def __init__( self : Tuple , snake_case_ : Dict ):
super().__init__()
__snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__snake_case = config.num_channels
def lowerCAmelCase ( self : Tuple , snake_case_ : Dict ):
__snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
__snake_case = self.embedder(_a )
__snake_case = self.pooler(_a )
return embedding
class __magic_name__ ( nn.Module ):
def __init__( self : List[str] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : str = 2 ):
super().__init__()
__snake_case = nn.Convad(_a , _a , kernel_size=1 , stride=_a , bias=_a )
__snake_case = nn.BatchNormad(_a )
def lowerCAmelCase ( self : int , snake_case_ : int ):
__snake_case = self.convolution(_a )
__snake_case = self.normalization(_a )
return hidden_state
class __magic_name__ ( nn.Module ):
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : int = 1 , snake_case_ : int = "relu" ):
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = (
ResNetShortCut(_a , _a , stride=_a ) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
ResNetConvLayer(_a , _a , stride=_a ) , ResNetConvLayer(_a , _a , activation=_a ) , )
__snake_case = ACTaFN[activation]
def lowerCAmelCase ( self : Optional[int] , snake_case_ : Union[str, Any] ):
__snake_case = hidden_state
__snake_case = self.layer(_a )
__snake_case = self.shortcut(_a )
hidden_state += residual
__snake_case = self.activation(_a )
return hidden_state
class __magic_name__ ( nn.Module ):
def __init__( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Optional[int] = 1 , snake_case_ : Tuple = "relu" , snake_case_ : List[str] = 4 ):
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = out_channels // reduction
__snake_case = (
ResNetShortCut(_a , _a , stride=_a ) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
ResNetConvLayer(_a , _a , kernel_size=1 ) , ResNetConvLayer(_a , _a , stride=_a ) , ResNetConvLayer(_a , _a , kernel_size=1 , activation=_a ) , )
__snake_case = ACTaFN[activation]
def lowerCAmelCase ( self : Tuple , snake_case_ : Tuple ):
__snake_case = hidden_state
__snake_case = self.layer(_a )
__snake_case = self.shortcut(_a )
hidden_state += residual
__snake_case = self.activation(_a )
return hidden_state
class __magic_name__ ( nn.Module ):
def __init__( self : Any , snake_case_ : Any , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int] = 2 , snake_case_ : str = 2 , ):
super().__init__()
__snake_case = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_a , _a , stride=_a , activation=config.hidden_act ) , *[layer(_a , _a , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self : str , snake_case_ : Dict ):
__snake_case = input
for layer in self.layers:
__snake_case = layer(_a )
return hidden_state
class __magic_name__ ( nn.Module ):
def __init__( self : List[str] , snake_case_ : Union[str, Any] ):
super().__init__()
__snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_a , config.depths[1:] ):
self.stages.append(ResNetStage(_a , _a , _a , depth=_a ) )
def lowerCAmelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Dict = False , snake_case_ : List[str] = True ):
__snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
__snake_case = stage_module(_a )
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_a , hidden_states=_a , )
class __magic_name__ ( UpperCamelCase_ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ResNetConfig
_SCREAMING_SNAKE_CASE : int = """resnet"""
_SCREAMING_SNAKE_CASE : Dict = """pixel_values"""
_SCREAMING_SNAKE_CASE : str = True
def lowerCAmelCase ( self : Dict , snake_case_ : Optional[int] ):
if isinstance(_a , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple=False ):
if isinstance(_a , _a ):
__snake_case = value
_SCREAMING_SNAKE_CASE = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_SCREAMING_SNAKE_CASE = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , UpperCamelCase_ , )
class __magic_name__ ( UpperCamelCase_ ):
def __init__( self : int , snake_case_ : Union[str, Any] ):
super().__init__(_a )
__snake_case = config
__snake_case = ResNetEmbeddings(_a )
__snake_case = ResNetEncoder(_a )
__snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self : Optional[int] , snake_case_ : int , snake_case_ : Optional[Any] = None , snake_case_ : int = None ):
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.embedder(_a )
__snake_case = self.encoder(
_a , output_hidden_states=_a , return_dict=_a )
__snake_case = encoder_outputs[0]
__snake_case = self.pooler(_a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCamelCase_ , )
class __magic_name__ ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : List[str] ):
super().__init__(_a )
__snake_case = config.num_labels
__snake_case = ResNetModel(_a )
# classification head
__snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self : List[str] , snake_case_ : List[Any] = None , snake_case_ : Optional[Any] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , ):
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.resnet(_a , output_hidden_states=_a , return_dict=_a )
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier(_a )
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = """single_label_classification"""
else:
__snake_case = """multi_label_classification"""
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(_a , _a )
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a , logits=_a , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , UpperCamelCase_ , )
class __magic_name__ ( UpperCamelCase_ , UpperCamelCase_ ):
def __init__( self : Optional[int] , snake_case_ : Optional[int] ):
super().__init__(_a )
super()._init_backbone(_a )
__snake_case = [config.embedding_size] + config.hidden_sizes
__snake_case = ResNetEmbeddings(_a )
__snake_case = ResNetEncoder(_a )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@replace_return_docstrings(output_type=_a , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self : Tuple , snake_case_ : int , snake_case_ : Dict = None , snake_case_ : Optional[int] = None ):
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = self.embedder(_a )
__snake_case = self.encoder(_a , output_hidden_states=_a , return_dict=_a )
__snake_case = outputs.hidden_states
__snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_a , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_a , )
| 163 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = data
def __iter__( self ) -> Tuple:
"""simple docstring"""
for element in self.data:
yield element
def _lowercase ( __lowerCAmelCase=True ) -> str:
SCREAMING_SNAKE_CASE__ : str = Accelerator(even_batches=__lowerCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> Optional[int]:
if iterable:
SCREAMING_SNAKE_CASE__ : int = DummyIterableDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = TensorDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : str = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = accelerator.prepare(__lowerCAmelCase )
return dl
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = create_dataloader(accelerator=__lowerCAmelCase , dataset_size=__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowercase ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_accelerator(even_batches=__lowerCAmelCase )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowercase ( ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = create_accelerator(even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ : int = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE__ : List[Any] = output.sum()
loss.backward()
batch_idxs.append(__lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowercase ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Any = create_accelerator(even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.prepare(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ : List[Any] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE__ : str = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : int = create_accelerator(even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : str = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowercase ( ) -> List[str]:
SCREAMING_SNAKE_CASE__ : str = create_accelerator()
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE__ : Optional[int] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = original_state
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
from PIL import Image
def SCREAMING_SNAKE_CASE ( a_ : Any , a_ : int ):
__a = (259 * (level + 255)) / (255 * (259 - level))
def contrast(a_ : List[str] ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
UpperCAmelCase_ = change_contrast(img, 1_70)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 539 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase = 200_0000 ) -> int:
SCREAMING_SNAKE_CASE__ : int = [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for i in range(__lowerCAmelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 680 | 0 |
from collections import Counter
from timeit import timeit
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "" ) -> bool:
if len(__lowerCAmelCase ) == 0:
return True
UpperCamelCase__ : Tuple = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCamelCase__ : dict[str, int] = {}
for character in lower_case_input_str:
UpperCamelCase__ : Any = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1
UpperCamelCase__ : Tuple = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "" ) -> None:
print("\nFor string = " , __lowerCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
lowerCamelCase : List[str] =input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
lowerCamelCase : Optional[int] =can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""") | 228 |
"""simple docstring"""
import numpy as np
import qiskit
def _lowercase ( __lowerCAmelCase = 8 , __lowerCAmelCase = None ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] = np.random.default_rng(seed=__lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE__ : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE__ : List[Any] = rng.integers(2 , size=__lowerCAmelCase )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE__ : Optional[Any] = rng.integers(2 , size=__lowerCAmelCase )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE__ : str = rng.integers(2 , size=__lowerCAmelCase )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qiskit.QuantumCircuit(__lowerCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE__ : str = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE__ : Optional[int] = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1 , seed_simulator=__lowerCAmelCase )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE__ : int = job.result().get_counts(__lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE__ : Optional[Any] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE__ : Optional[int] = gen_key[:key_len] if len(__lowerCAmelCase ) >= key_len else gen_key.ljust(__lowerCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 680 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = checkpoints.load_tax_checkpoint(__lowerCAmelCase )
lowerCamelCase_ = flatten_dict(__lowerCAmelCase )
return flax_params
def _SCREAMING_SNAKE_CASE ( lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = {}
lowerCamelCase_ = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
lowerCamelCase_ = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ = new_key.replace(__lowerCAmelCase , __lowerCAmelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ = new_key.replace(__lowerCAmelCase , __lowerCAmelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ = re.sub(r'layers_(\d+)' , r'layer.\1' , __lowerCAmelCase )
lowerCamelCase_ = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ = re.sub(r'layers_(\d+)' , r'layer.\1' , __lowerCAmelCase )
lowerCamelCase_ = flax_dict[key]
lowerCamelCase_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ = torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Dict , lowercase : Optional[int]=False , lowercase : str=False ):
'''simple docstring'''
lowerCamelCase_ = get_flax_param(__lowerCAmelCase )
if not use_large:
lowerCamelCase_ = PixaStructVisionConfig()
lowerCamelCase_ = PixaStructTextConfig()
else:
lowerCamelCase_ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
lowerCamelCase_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__lowerCAmelCase )
lowerCamelCase_ = PixaStructForConditionalGeneration(__lowerCAmelCase )
lowerCamelCase_ = rename_and_convert_flax_params(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
lowerCamelCase_ = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
lowerCamelCase_ = PixaStructImageProcessor()
lowerCamelCase_ = PixaStructProcessor(image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
if use_large:
lowerCamelCase_ = 40_96
lowerCamelCase_ = True
# mkdir if needed
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
print('Model saved in {}'.format(__lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
lowerCamelCase : List[str] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 70 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = StableDiffusionInpaintPipeline
_SCREAMING_SNAKE_CASE :Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_SCREAMING_SNAKE_CASE :Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_SCREAMING_SNAKE_CASE :Optional[int] = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE :Dict = frozenset([])
def _a ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _a , _a=0 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_a ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(_a )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInpaintPipeline(**_a )
SCREAMING_SNAKE_CASE__ : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : int = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
SCREAMING_SNAKE_CASE__ : List[str] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _a ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : List[str] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : Dict = PNDMScheduler.from_pretrained(_a , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 680 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase__ : Union[str, Any] = coefficient_matrix.shape
UpperCAmelCase__ : int = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase__ : Tuple = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__lowerCAmelCase )
if colsa != 1:
UpperCAmelCase__ : str = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__lowerCAmelCase )
if rowsa != rowsa:
UpperCAmelCase__ : Union[str, Any] = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__lowerCAmelCase )
if len(__lowerCAmelCase ) != rowsa:
UpperCAmelCase__ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(__lowerCAmelCase )} and {rowsa}'''
)
raise ValueError(__lowerCAmelCase )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
UpperCAmelCase__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
UpperCAmelCase__ : List[str] = table.shape
strictly_diagonally_dominant(__lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(__lowerCAmelCase ):
UpperCAmelCase__ : Any = []
for row in range(__lowerCAmelCase ):
UpperCAmelCase__ : List[str] = 0
for col in range(__lowerCAmelCase ):
if col == row:
UpperCAmelCase__ : int = table[row][col]
elif col == cols - 1:
UpperCAmelCase__ : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase__ : Any = (temp + val) / denom
new_val.append(__lowerCAmelCase )
UpperCAmelCase__ : Dict = new_val
return [float(__lowerCAmelCase ) for i in new_val]
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Any = table.shape
UpperCAmelCase__ : str = True
for i in range(0 ,__lowerCAmelCase ):
UpperCAmelCase__ : str = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a :str = logging.getLogger(__name__)
def _lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=__lowerCAmelCase , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=__lowerCAmelCase , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=__lowerCAmelCase , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=__lowerCAmelCase , default="""data/dump""" , help="""The dump file prefix.""" )
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
SCREAMING_SNAKE_CASE__ : str = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE__ : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
SCREAMING_SNAKE_CASE__ : int = fp.readlines()
logger.info("""Start encoding""" )
logger.info(F'''{len(__lowerCAmelCase )} examples to process.''' )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1_0000
SCREAMING_SNAKE_CASE__ : Dict = time.time()
for text in data:
SCREAMING_SNAKE_CASE__ : Dict = F'''{bos} {text.strip()} {sep}'''
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
rslt.append(__lowerCAmelCase )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE__ : str = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
SCREAMING_SNAKE_CASE__ : Tuple = time.time()
logger.info("""Finished binarization""" )
logger.info(F'''{len(__lowerCAmelCase )} examples processed.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
SCREAMING_SNAKE_CASE__ : Tuple = [np.uintaa(__lowerCAmelCase ) for d in rslt]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [np.intaa(__lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(__lowerCAmelCase , """wb""" ) as handle:
pickle.dump(rslt_ , __lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 680 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a : int ="""swinv2"""
a : List[str] ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case__=224 , snake_case__=4 , snake_case__=3 , snake_case__=96 , snake_case__=[2, 2, 6, 2] , snake_case__=[3, 6, 12, 24] , snake_case__=7 , snake_case__=4.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=False , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=32 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**_a )
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : int = len(_a )
lowerCAmelCase : List[str] = num_heads
lowerCAmelCase : Tuple = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : Tuple = qkv_bias
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Optional[int] = drop_path_rate
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Any = use_absolute_embeddings
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(_a ) - 1) )
lowerCAmelCase : List[Any] = (0, 0, 0, 0)
| 645 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a :List[Any] = ""
a :Union[str, Any] = ""
a :List[str] = ""
a :str = 1 # (0 is vertical, 1 is horizontal)
def _lowercase ( ) -> None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print("""Processing...""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE__ : List[Any] = random_chars(32 )
SCREAMING_SNAKE_CASE__ : List[str] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
SCREAMING_SNAKE_CASE__ : List[str] = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
SCREAMING_SNAKE_CASE__ : int = []
for anno in new_annos[index]:
SCREAMING_SNAKE_CASE__ : Tuple = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[list, list]:
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , """*.txt""" ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
SCREAMING_SNAKE_CASE__ : Dict = in_file.readlines()
SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , F'''{label_name}.jpg''' )
SCREAMING_SNAKE_CASE__ : int = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE__ : Optional[int] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 ) -> tuple[list, list, list]:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for idx in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : str = img_list[idx]
path_list.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = anno_list[idx]
SCREAMING_SNAKE_CASE__ : Tuple = cva.imread(__lowerCAmelCase )
if flip_type == 1:
SCREAMING_SNAKE_CASE__ : int = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ : Optional[int] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
SCREAMING_SNAKE_CASE__ : Any = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ : List[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def _lowercase ( __lowerCAmelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE__ : List[str] = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 680 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__snake_case :List[Any] =logging.get_logger(__name__)
__snake_case :Union[str, Any] ={
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase__ ( UpperCamelCase_ ):
A_ : Union[str, Any] = """dpt"""
def __init__( self : List[str] , __UpperCamelCase : str=768 , __UpperCamelCase : Dict=12 , __UpperCamelCase : Union[str, Any]=12 , __UpperCamelCase : List[Any]=3_072 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Tuple=384 , __UpperCamelCase : Dict=16 , __UpperCamelCase : Any=3 , __UpperCamelCase : Dict=False , __UpperCamelCase : int=True , __UpperCamelCase : Tuple=[2, 5, 8, 11] , __UpperCamelCase : Union[str, Any]="project" , __UpperCamelCase : Dict=[4, 2, 1, 0.5] , __UpperCamelCase : Tuple=[96, 192, 384, 768] , __UpperCamelCase : Optional[int]=256 , __UpperCamelCase : int=-1 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : int=0.4 , __UpperCamelCase : str=255 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : int=[1, 1_024, 24, 24] , __UpperCamelCase : List[str]=[0, 1] , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : int , ) -> int:
super().__init__(**_a )
A = hidden_size
A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
A = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
A = BitConfig(**_a )
elif isinstance(_a , _a ):
logger.info('Initializing the config with a `BiT` backbone.' )
A = BitConfig(**_a )
elif isinstance(_a , _a ):
A = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
A = backbone_featmap_shape
A = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
A = None
A = None
A = []
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
A = readout_type
A = reassemble_factors
A = neck_hidden_sizes
A = fusion_hidden_size
A = head_in_index
A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = semantic_loss_ignore_index
A = semantic_classifier_dropout
def __UpperCamelCase ( self : int ) -> Optional[Any]:
A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output | 106 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __a (enum.Enum):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = 0
_SCREAMING_SNAKE_CASE :List[Any] = 1
_SCREAMING_SNAKE_CASE :Dict = 2
@add_end_docstrings(UpperCamelCase_)
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *_a , **_a ) -> Tuple:
"""simple docstring"""
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
SCREAMING_SNAKE_CASE__ : Any = None
if self.model.config.prefix is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self._sanitize_parameters(prefix=_a , **self._forward_params )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {**self._preprocess_params, **preprocess_params}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {**self._forward_params, **forward_params}
def _a ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = {}
if prefix is not None:
SCREAMING_SNAKE_CASE__ : Dict = prefix
if prefix:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Tuple = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""" )
SCREAMING_SNAKE_CASE__ : int = handle_long_generation
preprocess_params.update(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = generate_kwargs
SCREAMING_SNAKE_CASE__ : int = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
SCREAMING_SNAKE_CASE__ : List[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
SCREAMING_SNAKE_CASE__ : Tuple = ReturnType.TENSORS
if return_type is not None:
SCREAMING_SNAKE_CASE__ : int = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self , *_a , **_a ) -> Any:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> Optional[int]:
"""simple docstring"""
return super().__call__(_a , **_a )
def _a ( self , _a , _a="" , _a=None , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Tuple = prompt_text
if handle_long_generation == "hole":
SCREAMING_SNAKE_CASE__ : List[Any] = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = generate_kwargs["""max_new_tokens"""]
else:
SCREAMING_SNAKE_CASE__ : Tuple = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def _a ( self , _a , **_a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_inputs["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.get("""attention_mask""" , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
SCREAMING_SNAKE_CASE__ : Optional[int] = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
SCREAMING_SNAKE_CASE__ : List[str] = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
SCREAMING_SNAKE_CASE__ : int = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
SCREAMING_SNAKE_CASE__ : Dict = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
SCREAMING_SNAKE_CASE__ : Tuple = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = generated_sequence.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : str = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _a ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs["""generated_sequence"""][0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_outputs["""input_ids"""]
SCREAMING_SNAKE_CASE__ : str = model_outputs["""prompt_text"""]
SCREAMING_SNAKE_CASE__ : Any = generated_sequence.numpy().tolist()
SCREAMING_SNAKE_CASE__ : List[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE__ : Tuple = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
SCREAMING_SNAKE_CASE__ : Dict = 0
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
SCREAMING_SNAKE_CASE__ : Tuple = prompt_text + text[prompt_length:]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text[prompt_length:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""generated_text""": all_text}
records.append(_a )
return records
| 680 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _a ( lowerCamelCase, lowerCamelCase ):
# ===== initialization =====
lowerCamelCase : Dict = Mock()
lowerCamelCase : Optional[int] = conn, Mock()
lowerCamelCase : Optional[Any] = iter([1, None] )
lowerCamelCase : Tuple = lambda lowerCamelCase : next(lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""", testing=lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 681 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
_lowerCamelCase =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( lowerCamelCase ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase : Optional[int] = model_type_to_module_name(lowerCamelCase )
lowerCamelCase : Optional[int] = importlib.import_module(F'''.{module_name}''', """transformers.models""" )
try:
return getattr(lowerCamelCase, lowerCamelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase, """__name__""", lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase : Tuple = importlib.import_module("""transformers""" )
if hasattr(lowerCamelCase, lowerCamelCase ):
return getattr(lowerCamelCase, lowerCamelCase )
return None
def _a ( lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, **lowerCamelCase, ):
lowerCamelCase : str = get_file_from_repo(
lowerCamelCase, lowerCamelCase, cache_dir=lowerCamelCase, force_download=lowerCamelCase, resume_download=lowerCamelCase, proxies=lowerCamelCase, use_auth_token=lowerCamelCase, revision=lowerCamelCase, local_files_only=lowerCamelCase, )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowerCamelCase, encoding="""utf-8""" ) as reader:
return json.load(lowerCamelCase )
class A__ :
def __init__( self ):
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__magic_name__ )
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase : Optional[int] = kwargs.pop("""config""" , __magic_name__ )
lowerCamelCase : Optional[int] = kwargs.pop("""trust_remote_code""" , __magic_name__ )
lowerCamelCase : Any = True
lowerCamelCase , lowerCamelCase : Union[str, Any] = FeatureExtractionMixin.get_feature_extractor_dict(__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = config_dict.get("""feature_extractor_type""" , __magic_name__ )
lowerCamelCase : Dict = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
lowerCamelCase : Union[str, Any] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = AutoConfig.from_pretrained(__magic_name__ , **__magic_name__ )
# It could be in `config.feature_extractor_type``
lowerCamelCase : List[str] = getattr(__magic_name__ , """feature_extractor_type""" , __magic_name__ )
if hasattr(__magic_name__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase : int = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
lowerCamelCase : Tuple = feature_extractor_class_from_name(__magic_name__ )
lowerCamelCase : Union[str, Any] = feature_extractor_auto_map is not None
lowerCamelCase : str = feature_extractor_class is not None or type(__magic_name__ ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase : List[Any] = resolve_trust_remote_code(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if has_remote_code and trust_remote_code:
lowerCamelCase : Tuple = get_class_from_dynamic_module(
__magic_name__ , __magic_name__ , **__magic_name__ )
lowerCamelCase : Tuple = kwargs.pop("""code_revision""" , __magic_name__ )
if os.path.isdir(__magic_name__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__magic_name__ , **__magic_name__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__magic_name__ , **__magic_name__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__magic_name__ ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(__magic_name__ )]
return feature_extractor_class.from_dict(__magic_name__ , **__magic_name__ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
FEATURE_EXTRACTOR_MAPPING.register(__magic_name__ , __magic_name__ )
| 681 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 681 | 1 |
from math import factorial, pi
def _a ( lowerCamelCase, lowerCamelCase = 30 ):
if not isinstance(lowerCamelCase, (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowerCamelCase, lowerCamelCase ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCamelCase : Optional[Any] = float(lowerCamelCase )
lowerCamelCase : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCamelCase ) )
def _a ( lowerCamelCase, lowerCamelCase = 30 ):
if not isinstance(lowerCamelCase, (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowerCamelCase, lowerCamelCase ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCamelCase : Optional[Any] = float(lowerCamelCase )
lowerCamelCase : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 681 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681 | 1 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _a ( lowerCamelCase, lowerCamelCase ):
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(lowerCamelCase, lowerCamelCase ) ) )
def _a ( lowerCamelCase, lowerCamelCase ):
if dataset.ndim != value_array.ndim:
lowerCamelCase : int = (
"""Wrong input data's dimensions... """
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowerCamelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCamelCase : Any = (
"""Wrong input data's shape... """
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowerCamelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCamelCase : Optional[Any] = (
"""Input data have different datatype... """
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowerCamelCase )
lowerCamelCase : str = []
for value in value_array:
lowerCamelCase : str = euclidean(lowerCamelCase, dataset[0] )
lowerCamelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCamelCase : Optional[int] = euclidean(lowerCamelCase, lowerCamelCase )
if dist > temp_dist:
lowerCamelCase : List[Any] = temp_dist
lowerCamelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _a ( lowerCamelCase, lowerCamelCase ):
return np.dot(lowerCamelCase, lowerCamelCase ) / (norm(lowerCamelCase ) * norm(lowerCamelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 681 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681 | 1 |
def _a ( lowerCamelCase = 1000 ):
return sum(e for e in range(3, lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 681 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 | 1 |
import argparse
import os
import re
_lowerCamelCase ="""src/transformers"""
# Pattern that looks at the indentation in a line.
_lowerCamelCase =re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCamelCase =re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCamelCase =re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCamelCase =re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCamelCase =re.compile(R"""\[([^\]]+)\]""")
def _a ( lowerCamelCase ):
lowerCamelCase : List[Any] = _re_indent.search(lowerCamelCase )
return "" if search is None else search.groups()[0]
def _a ( lowerCamelCase, lowerCamelCase="", lowerCamelCase=None, lowerCamelCase=None ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : str = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase ):
index += 1
lowerCamelCase : Dict = ["""\n""".join(lines[:index] )]
else:
lowerCamelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase : Tuple = [lines[index]]
index += 1
while index < len(lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowerCamelCase ) )
if index < len(lowerCamelCase ) - 1:
lowerCamelCase : str = [lines[index + 1]]
index += 1
else:
lowerCamelCase : Union[str, Any] = []
else:
blocks.append("""\n""".join(lowerCamelCase ) )
lowerCamelCase : Any = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase ) > 0:
blocks.append("""\n""".join(lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _a ( lowerCamelCase ):
def _inner(lowerCamelCase ):
return key(lowerCamelCase ).lower().replace("""_""", """""" )
return _inner
def _a ( lowerCamelCase, lowerCamelCase=None ):
# If no key is provided, we use a noop.
def noop(lowerCamelCase ):
return x
if key is None:
lowerCamelCase : List[Any] = noop
# Constants are all uppercase, they go first.
lowerCamelCase : Tuple = [obj for obj in objects if key(lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase : int = [obj for obj in objects if key(lowerCamelCase )[0].isupper() and not key(lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase : Tuple = [obj for obj in objects if not key(lowerCamelCase )[0].isupper()]
lowerCamelCase : str = ignore_underscore(lowerCamelCase )
return sorted(lowerCamelCase, key=lowerCamelCase ) + sorted(lowerCamelCase, key=lowerCamelCase ) + sorted(lowerCamelCase, key=lowerCamelCase )
def _a ( lowerCamelCase ):
# This inner function sort imports between [ ].
def _replace(lowerCamelCase ):
lowerCamelCase : Optional[int] = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
lowerCamelCase : Optional[Any] = [part.strip().replace("""\"""", """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase : List[str] = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(lowerCamelCase )] ) + "]"
lowerCamelCase : Optional[Any] = import_statement.split("""\n""" )
if len(lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase : List[Any] = 2 if lines[1].strip() == """[""" else 1
lowerCamelCase : List[Any] = [(i, _re_strip_line.search(lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase : Any = sort_objects(lowerCamelCase, key=lambda lowerCamelCase : x[1] )
lowerCamelCase : str = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase : int = _re_bracket_content.sub(_replace, lines[1] )
else:
lowerCamelCase : Optional[Any] = [part.strip().replace("""\"""", """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase : List[Any] = keys[:-1]
lowerCamelCase : List[Any] = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(lowerCamelCase )] )
return "\n".join(lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase : str = _re_bracket_content.sub(_replace, lowerCamelCase )
return import_statement
def _a ( lowerCamelCase, lowerCamelCase=True ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase : Tuple = split_code_in_indented_blocks(
lowerCamelCase, start_prompt="""_import_structure = {""", end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase : Tuple = main_blocks[block_idx]
lowerCamelCase : Optional[int] = block.split("""\n""" )
# Get to the start of the imports.
lowerCamelCase : List[str] = 0
while line_idx < len(lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase : Optional[int] = len(lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase : List[str] = """\n""".join(block_lines[line_idx:-1] )
lowerCamelCase : List[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase : Union[str, Any] = split_code_in_indented_blocks(lowerCamelCase, indent_level=lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase : str = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase : List[str] = [(pattern.search(lowerCamelCase ).groups()[0] if pattern.search(lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase : List[str] = [(i, key) for i, key in enumerate(lowerCamelCase ) if key is not None]
lowerCamelCase : Optional[int] = [x[0] for x in sorted(lowerCamelCase, key=lambda lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = []
for i in range(len(lowerCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCamelCase : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase : List[str] = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowerCamelCase ) )
def _a ( lowerCamelCase=True ):
lowerCamelCase : List[Any] = []
for root, _, files in os.walk(lowerCamelCase ):
if "__init__.py" in files:
lowerCamelCase : List[str] = sort_imports(os.path.join(lowerCamelCase, """__init__.py""" ), check_only=lowerCamelCase )
if result:
lowerCamelCase : Dict = [os.path.join(lowerCamelCase, """__init__.py""" )]
if len(lowerCamelCase ) > 0:
raise ValueError(F'''Would overwrite {len(lowerCamelCase )} files, run `make style`.''' )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_lowerCamelCase =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCamelCase ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCamelCase ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase : Any = numpy_to_pil(lowerCamelCase )
return images
def _a ( lowerCamelCase ):
if images.ndim == 3:
lowerCamelCase : Optional[Any] = images[None, ...]
lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images]
else:
lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 681 | 1 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 | 1 |
def _a ( lowerCamelCase = 1000 ):
lowerCamelCase : List[Any] = 2**power
lowerCamelCase : List[str] = str(lowerCamelCase )
lowerCamelCase : int = list(lowerCamelCase )
lowerCamelCase : List[str] = 0
for i in list_num:
sum_of_num += int(lowerCamelCase )
return sum_of_num
if __name__ == "__main__":
_lowerCamelCase =int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_lowerCamelCase =solution(power)
print("""Sum of the digits is: """, result)
| 681 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="""▁"""
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = BertGenerationTokenizer
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """<s>"""
lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """Hello World!"""
lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase : str = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase : Dict = """ """.join(__magic_name__ )
lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = BertGenerationConfig()
lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 681 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class A__ ( nn.Module):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0.0 , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = "layer_norm" , __magic_name__ = False , ):
super().__init__()
lowerCamelCase : Optional[Any] = only_cross_attention
lowerCamelCase : Dict = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
lowerCamelCase : List[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCamelCase : Dict = AdaLayerNorm(__magic_name__ , __magic_name__ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase : List[Any] = AdaLayerNormZero(__magic_name__ , __magic_name__ )
else:
lowerCamelCase : Optional[Any] = nn.LayerNorm(__magic_name__ , elementwise_affine=__magic_name__ )
lowerCamelCase : Tuple = Attention(
query_dim=__magic_name__ , heads=__magic_name__ , dim_head=__magic_name__ , dropout=__magic_name__ , bias=__magic_name__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__magic_name__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCamelCase : List[Any] = (
AdaLayerNorm(__magic_name__ , __magic_name__ )
if self.use_ada_layer_norm
else nn.LayerNorm(__magic_name__ , elementwise_affine=__magic_name__ )
)
lowerCamelCase : str = Attention(
query_dim=__magic_name__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__magic_name__ , dim_head=__magic_name__ , dropout=__magic_name__ , bias=__magic_name__ , upcast_attention=__magic_name__ , ) # is self-attn if encoder_hidden_states is none
else:
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : int = None
# 3. Feed-forward
lowerCamelCase : Tuple = nn.LayerNorm(__magic_name__ , elementwise_affine=__magic_name__ )
lowerCamelCase : str = FeedForward(__magic_name__ , dropout=__magic_name__ , activation_fn=__magic_name__ , final_dropout=__magic_name__ )
# let chunk size default to None
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Optional[int] = 0
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
# Sets chunk feed-forward
lowerCamelCase : List[Any] = chunk_size
lowerCamelCase : Optional[int] = dim
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowerCamelCase : Union[str, Any] = self.norma(__magic_name__ , __magic_name__ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : int = self.norma(
__magic_name__ , __magic_name__ , __magic_name__ , hidden_dtype=hidden_states.dtype )
else:
lowerCamelCase : List[Any] = self.norma(__magic_name__ )
lowerCamelCase : str = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCamelCase : List[Any] = self.attna(
__magic_name__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__magic_name__ , **__magic_name__ , )
if self.use_ada_layer_norm_zero:
lowerCamelCase : List[str] = gate_msa.unsqueeze(1 ) * attn_output
lowerCamelCase : List[str] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCamelCase : Union[str, Any] = (
self.norma(__magic_name__ , __magic_name__ ) if self.use_ada_layer_norm else self.norma(__magic_name__ )
)
lowerCamelCase : List[str] = self.attna(
__magic_name__ , encoder_hidden_states=__magic_name__ , attention_mask=__magic_name__ , **__magic_name__ , )
lowerCamelCase : Optional[Any] = attn_output + hidden_states
# 3. Feed-forward
lowerCamelCase : Any = self.norma(__magic_name__ )
if self.use_ada_layer_norm_zero:
lowerCamelCase : List[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
lowerCamelCase : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCamelCase : Dict = torch.cat(
[self.ff(__magic_name__ ) for hid_slice in norm_hidden_states.chunk(__magic_name__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCamelCase : Union[str, Any] = self.ff(__magic_name__ )
if self.use_ada_layer_norm_zero:
lowerCamelCase : str = gate_mlp.unsqueeze(1 ) * ff_output
lowerCamelCase : List[Any] = ff_output + hidden_states
return hidden_states
class A__ ( nn.Module):
def __init__( self , __magic_name__ , __magic_name__ = None , __magic_name__ = 4 , __magic_name__ = 0.0 , __magic_name__ = "geglu" , __magic_name__ = False , ):
super().__init__()
lowerCamelCase : Dict = int(dim * mult )
lowerCamelCase : Dict = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCamelCase : List[Any] = GELU(__magic_name__ , __magic_name__ )
if activation_fn == "gelu-approximate":
lowerCamelCase : Dict = GELU(__magic_name__ , __magic_name__ , approximate="""tanh""" )
elif activation_fn == "geglu":
lowerCamelCase : Tuple = GEGLU(__magic_name__ , __magic_name__ )
elif activation_fn == "geglu-approximate":
lowerCamelCase : List[str] = ApproximateGELU(__magic_name__ , __magic_name__ )
lowerCamelCase : List[Any] = nn.ModuleList([] )
# project in
self.net.append(__magic_name__ )
# project dropout
self.net.append(nn.Dropout(__magic_name__ ) )
# project out
self.net.append(nn.Linear(__magic_name__ , __magic_name__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ ):
for module in self.net:
lowerCamelCase : Optional[Any] = module(__magic_name__ )
return hidden_states
class A__ ( nn.Module):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = "none" ):
super().__init__()
lowerCamelCase : List[str] = nn.Linear(__magic_name__ , __magic_name__ )
lowerCamelCase : Union[str, Any] = approximate
def UpperCamelCase__ ( self , __magic_name__ ):
if gate.device.type != "mps":
return F.gelu(__magic_name__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[int] = self.proj(__magic_name__ )
lowerCamelCase : List[str] = self.gelu(__magic_name__ )
return hidden_states
class A__ ( nn.Module):
def __init__( self , __magic_name__ , __magic_name__ ):
super().__init__()
lowerCamelCase : Any = nn.Linear(__magic_name__ , dim_out * 2 )
def UpperCamelCase__ ( self , __magic_name__ ):
if gate.device.type != "mps":
return F.gelu(__magic_name__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase , lowerCamelCase : List[Any] = self.proj(__magic_name__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__magic_name__ )
class A__ ( nn.Module):
def __init__( self , __magic_name__ , __magic_name__ ):
super().__init__()
lowerCamelCase : Dict = nn.Linear(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Any = self.proj(__magic_name__ )
return x * torch.sigmoid(1.702 * x )
class A__ ( nn.Module):
def __init__( self , __magic_name__ , __magic_name__ ):
super().__init__()
lowerCamelCase : Optional[int] = nn.Embedding(__magic_name__ , __magic_name__ )
lowerCamelCase : Union[str, Any] = nn.SiLU()
lowerCamelCase : List[Any] = nn.Linear(__magic_name__ , embedding_dim * 2 )
lowerCamelCase : Tuple = nn.LayerNorm(__magic_name__ , elementwise_affine=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Union[str, Any] = self.linear(self.silu(self.emb(__magic_name__ ) ) )
lowerCamelCase , lowerCamelCase : Any = torch.chunk(__magic_name__ , 2 )
lowerCamelCase : Any = self.norm(__magic_name__ ) * (1 + scale) + shift
return x
class A__ ( nn.Module):
def __init__( self , __magic_name__ , __magic_name__ ):
super().__init__()
lowerCamelCase : Any = CombinedTimestepLabelEmbeddings(__magic_name__ , __magic_name__ )
lowerCamelCase : Any = nn.SiLU()
lowerCamelCase : Optional[int] = nn.Linear(__magic_name__ , 6 * embedding_dim , bias=__magic_name__ )
lowerCamelCase : Optional[Any] = nn.LayerNorm(__magic_name__ , elementwise_affine=__magic_name__ , eps=1e-6 )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
lowerCamelCase : str = self.linear(self.silu(self.emb(__magic_name__ , __magic_name__ , hidden_dtype=__magic_name__ ) ) )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = emb.chunk(6 , dim=1 )
lowerCamelCase : Any = self.norm(__magic_name__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class A__ ( nn.Module):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = 1e-5 ):
super().__init__()
lowerCamelCase : Optional[Any] = num_groups
lowerCamelCase : Any = eps
if act_fn is None:
lowerCamelCase : List[str] = None
else:
lowerCamelCase : str = get_activation(__magic_name__ )
lowerCamelCase : Union[str, Any] = nn.Linear(__magic_name__ , out_dim * 2 )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
if self.act:
lowerCamelCase : List[str] = self.act(__magic_name__ )
lowerCamelCase : Tuple = self.linear(__magic_name__ )
lowerCamelCase : Optional[int] = emb[:, :, None, None]
lowerCamelCase , lowerCamelCase : int = emb.chunk(2 , dim=1 )
lowerCamelCase : Tuple = F.group_norm(__magic_name__ , self.num_groups , eps=self.eps )
lowerCamelCase : int = x * (1 + scale) + shift
return x
| 681 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase =HfArgumentParser(InitializationArguments)
_lowerCamelCase =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 681 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = OmegaConf.load(lowerCamelCase )
lowerCamelCase : Union[str, Any] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : str = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase : str = {}
lowerCamelCase : Tuple = """first_stage_model."""
for key in keys:
if key.startswith(lowerCamelCase ):
lowerCamelCase : int = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase : Union[str, Any] = {}
lowerCamelCase : Dict = """model.diffusion_model."""
for key in keys:
if key.startswith(lowerCamelCase ):
lowerCamelCase : List[Any] = state_dict[key]
lowerCamelCase : int = config.model.params.first_stage_config.params
lowerCamelCase : Optional[Any] = config.model.params.unet_config.params
lowerCamelCase : Any = VQModel(**lowerCamelCase ).eval()
vqvae.load_state_dict(lowerCamelCase )
lowerCamelCase : str = UNetLDMModel(**lowerCamelCase ).eval()
unet.load_state_dict(lowerCamelCase )
lowerCamelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule="""scaled_linear""", beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=lowerCamelCase, )
lowerCamelCase : Tuple = LDMPipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase )
pipeline.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
_lowerCamelCase =parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self , __magic_name__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """sshleifer/tiny-gpt2"""
lowerCamelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """sgugger/tiny-distilbert-classification"""
lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """sshleifer/tiny-gpt2"""
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , )
lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , """sequential""" ) )
self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) )
self.assertTrue(hasattr(__magic_name__ , """current""" ) )
self.assertTrue(hasattr(__magic_name__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
| 681 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A__ ( unittest.TestCase):
_UpperCAmelCase : Dict = StableDiffusionLDMaDPipeline
_UpperCAmelCase : Tuple = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
lowerCamelCase : int = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase : Union[str, Any] = CLIPTextModel(__magic_name__ )
lowerCamelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=0 ):
if str(__magic_name__ ).startswith("""mps""" ):
lowerCamelCase : List[Any] = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase : int = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : str = self.get_dummy_components()
lowerCamelCase : Any = StableDiffusionLDMaDPipeline(**__magic_name__ )
lowerCamelCase : Tuple = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : List[Any] = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : Optional[int] = ldmad_pipe(**__magic_name__ )
lowerCamelCase , lowerCamelCase : Dict = output.rgb, output.depth
lowerCamelCase : Optional[Any] = rgb[0, -3:, -3:, -1]
lowerCamelCase : Optional[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
lowerCamelCase : Dict = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
lowerCamelCase : Dict = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionLDMaDPipeline(**__magic_name__ )
lowerCamelCase : List[str] = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
# forward
lowerCamelCase : List[Any] = ldmad_pipe(**__magic_name__ )
lowerCamelCase , lowerCamelCase : List[str] = output.rgb, output.depth
lowerCamelCase : int = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase : Optional[Any] = depth_slice_a[0, -3:, -1]
lowerCamelCase : str = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : List[Any] = 3 * [inputs.pop("""prompt""" )]
lowerCamelCase : Tuple = ldmad_pipe.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors="""pt""" , )
lowerCamelCase : str = text_inputs["""input_ids"""].to(__magic_name__ )
lowerCamelCase : Optional[int] = ldmad_pipe.text_encoder(__magic_name__ )[0]
lowerCamelCase : List[str] = prompt_embeds
# forward
lowerCamelCase : Optional[Any] = ldmad_pipe(**__magic_name__ )
lowerCamelCase , lowerCamelCase : Dict = output.rgb, output.depth
lowerCamelCase : int = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase : Dict = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Optional[int] = self.get_dummy_components()
lowerCamelCase : str = PNDMScheduler(skip_prk_steps=__magic_name__ )
lowerCamelCase : Optional[int] = StableDiffusionLDMaDPipeline(**__magic_name__ )
lowerCamelCase : List[Any] = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : List[str] = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : Tuple = """french fries"""
lowerCamelCase : Any = ldmad_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
lowerCamelCase , lowerCamelCase : Dict = output.rgb, output.depth
lowerCamelCase : Optional[Any] = rgb[0, -3:, -3:, -1]
lowerCamelCase : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
lowerCamelCase : List[str] = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
lowerCamelCase : Dict = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__="cpu" , __magic_name__=torch.floataa , __magic_name__=0 ):
lowerCamelCase : int = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase : Optional[Any] = np.random.RandomState(__magic_name__ ).standard_normal((1, 4, 6_4, 6_4) )
lowerCamelCase : Any = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ )
lowerCamelCase : Optional[int] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
lowerCamelCase : str = ldmad_pipe.to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Any = self.get_inputs(__magic_name__ )
lowerCamelCase : Any = ldmad_pipe(**__magic_name__ )
lowerCamelCase , lowerCamelCase : Union[str, Any] = output.rgb, output.depth
lowerCamelCase : List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCamelCase : Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
lowerCamelCase : List[Any] = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
lowerCamelCase : int = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__="cpu" , __magic_name__=torch.floataa , __magic_name__=0 ):
lowerCamelCase : Dict = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase : List[str] = np.random.RandomState(__magic_name__ ).standard_normal((1, 4, 6_4, 6_4) )
lowerCamelCase : Any = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ )
lowerCamelCase : Tuple = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 5_0,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : List[str] = self.get_inputs(__magic_name__ )
lowerCamelCase : Tuple = ldmad_pipe(**__magic_name__ )
lowerCamelCase , lowerCamelCase : Optional[int] = output.rgb, output.depth
lowerCamelCase : Optional[Any] = 0.495_586
lowerCamelCase : List[Any] = 0.33_795_515
lowerCamelCase : str = 112.48_518
lowerCamelCase : str = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__magic_name__ )
ldmad_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Dict = self.get_inputs(__magic_name__ )
lowerCamelCase : List[str] = ldmad_pipe(**__magic_name__ )
lowerCamelCase , lowerCamelCase : str = output.rgb, output.depth
lowerCamelCase : int = 0.4_194_127
lowerCamelCase : int = 0.35_375_586
lowerCamelCase : List[str] = 0.5_638_502
lowerCamelCase : Tuple = 0.34_686_103
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 681 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _a ( lowerCamelCase ):
return x + 2
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """x = 3"""
lowerCamelCase : Tuple = {}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
lowerCamelCase : Optional[int] = """x = y"""
lowerCamelCase : Tuple = {"""y""": 5}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """y = add_two(x)"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """x = 3"""
lowerCamelCase : Dict = {}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """x = 3\ny = 5"""
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """text = f'This is x: {x}.'"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowerCamelCase : Tuple = {"""x""": 3}
lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} )
lowerCamelCase : Tuple = {"""x""": 8}
lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = """test_list = [x, add_two(x)]"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """y = x"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowerCamelCase : Any = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowerCamelCase : Dict = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i"""
lowerCamelCase : int = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
| 681 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = """wav2vec2"""
def __init__( self , __magic_name__=3_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=1e-5 , __magic_name__="group" , __magic_name__="gelu" , __magic_name__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __magic_name__=(5, 2, 2, 2, 2, 2, 2) , __magic_name__=(1_0, 3, 3, 3, 3, 2, 2) , __magic_name__=False , __magic_name__=1_2_8 , __magic_name__=1_6 , __magic_name__=False , __magic_name__=True , __magic_name__=0.05 , __magic_name__=1_0 , __magic_name__=2 , __magic_name__=0.0 , __magic_name__=1_0 , __magic_name__=0 , __magic_name__=3_2_0 , __magic_name__=2 , __magic_name__=0.1 , __magic_name__=1_0_0 , __magic_name__=2_5_6 , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__="sum" , __magic_name__=False , __magic_name__=False , __magic_name__=2_5_6 , __magic_name__=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __magic_name__=(5, 3, 3, 1, 1) , __magic_name__=(1, 2, 3, 1, 1) , __magic_name__=5_1_2 , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=False , __magic_name__=3 , __magic_name__=2 , __magic_name__=3 , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ )
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : int = feat_extract_norm
lowerCamelCase : Optional[Any] = feat_extract_activation
lowerCamelCase : List[str] = list(__magic_name__ )
lowerCamelCase : Optional[int] = list(__magic_name__ )
lowerCamelCase : Union[str, Any] = list(__magic_name__ )
lowerCamelCase : Optional[Any] = conv_bias
lowerCamelCase : Optional[int] = num_conv_pos_embeddings
lowerCamelCase : Tuple = num_conv_pos_embedding_groups
lowerCamelCase : str = len(self.conv_dim )
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : str = hidden_dropout
lowerCamelCase : Tuple = attention_dropout
lowerCamelCase : int = activation_dropout
lowerCamelCase : Optional[Any] = feat_proj_dropout
lowerCamelCase : Optional[Any] = final_dropout
lowerCamelCase : Optional[int] = layerdrop
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : List[str] = vocab_size
lowerCamelCase : List[Any] = do_stable_layer_norm
lowerCamelCase : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Dict = apply_spec_augment
lowerCamelCase : Optional[int] = mask_time_prob
lowerCamelCase : Dict = mask_time_length
lowerCamelCase : Optional[int] = mask_time_min_masks
lowerCamelCase : List[str] = mask_feature_prob
lowerCamelCase : int = mask_feature_length
lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase : int = num_codevectors_per_group
lowerCamelCase : int = num_codevector_groups
lowerCamelCase : int = contrastive_logits_temperature
lowerCamelCase : List[str] = feat_quantizer_dropout
lowerCamelCase : int = num_negatives
lowerCamelCase : Dict = codevector_dim
lowerCamelCase : Optional[Any] = proj_codevector_dim
lowerCamelCase : Optional[Any] = diversity_loss_weight
# ctc loss
lowerCamelCase : int = ctc_loss_reduction
lowerCamelCase : Optional[int] = ctc_zero_infinity
# adapter
lowerCamelCase : Any = add_adapter
lowerCamelCase : str = adapter_kernel_size
lowerCamelCase : Any = adapter_stride
lowerCamelCase : Union[str, Any] = num_adapter_layers
lowerCamelCase : Optional[int] = output_hidden_size or hidden_size
lowerCamelCase : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase : int = list(__magic_name__ )
lowerCamelCase : Any = list(__magic_name__ )
lowerCamelCase : Tuple = list(__magic_name__ )
lowerCamelCase : int = xvector_output_dim
@property
def UpperCamelCase__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 681 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = StableDiffusionPanoramaPipeline
_UpperCAmelCase : Dict = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
lowerCamelCase : Dict = DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCamelCase : Tuple = CLIPTextModel(__magic_name__ )
lowerCamelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=0 ):
lowerCamelCase : int = torch.manual_seed(__magic_name__ )
lowerCamelCase : int = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : int = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionPanoramaPipeline(**__magic_name__ )
lowerCamelCase : Optional[Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : int = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : Dict = sd_pipe(**__magic_name__ ).images
lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : Union[str, Any] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : str = StableDiffusionPanoramaPipeline(**__magic_name__ )
lowerCamelCase : Optional[Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : str = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : str = """french fries"""
lowerCamelCase : int = sd_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
lowerCamelCase : List[Any] = output.images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : Dict = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Optional[int] = self.get_dummy_components()
lowerCamelCase : List[str] = StableDiffusionPanoramaPipeline(**__magic_name__ )
lowerCamelCase : int = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : int = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : Optional[int] = sd_pipe(**__magic_name__ , view_batch_size=2 )
lowerCamelCase : Optional[Any] = output.images
lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : Optional[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**__magic_name__ )
lowerCamelCase : List[str] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : List[Any] = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : Optional[int] = sd_pipe(**__magic_name__ ).images
lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : List[str] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Optional[int] = self.get_dummy_components()
lowerCamelCase : List[Any] = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=__magic_name__ )
lowerCamelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**__magic_name__ )
lowerCamelCase : Optional[int] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCamelCase : Dict = self.get_dummy_inputs(__magic_name__ )
lowerCamelCase : List[str] = sd_pipe(**__magic_name__ ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : List[str] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , __magic_name__=0 ):
lowerCamelCase : Union[str, Any] = torch.manual_seed(__magic_name__ )
lowerCamelCase : List[str] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase : Optional[int] = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="""scheduler""" )
lowerCamelCase : Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
lowerCamelCase : Any = self.get_inputs()
lowerCamelCase : Optional[int] = pipe(**__magic_name__ ).images
lowerCamelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
lowerCamelCase : Optional[int] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=__magic_name__ )
lowerCamelCase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
lowerCamelCase : Optional[Any] = self.get_inputs()
lowerCamelCase : List[str] = pipe(**__magic_name__ ).images
lowerCamelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
lowerCamelCase : Dict = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = 0
def callback_fn(__magic_name__ , __magic_name__ , __magic_name__ ) -> None:
lowerCamelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
lowerCamelCase : str = latents[0, -3:, -3:, -1]
lowerCamelCase : List[str] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
lowerCamelCase : List[str] = latents[0, -3:, -3:, -1]
lowerCamelCase : Tuple = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase : Optional[int] = False
lowerCamelCase : Tuple = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase : Dict = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="""scheduler""" )
lowerCamelCase : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
lowerCamelCase : Any = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
lowerCamelCase : Optional[Any] = self.get_inputs()
pipe(**__magic_name__ , callback=__magic_name__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase : Optional[Any] = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase : Any = DDIMScheduler.from_pretrained(__magic_name__ , subfolder="""scheduler""" )
lowerCamelCase : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
lowerCamelCase : int = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Any = self.get_inputs()
lowerCamelCase : Any = pipe(**__magic_name__ )
lowerCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 681 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCamelCase =logging.get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = question_encoder
lowerCamelCase : Dict = generator
lowerCamelCase : Tuple = self.question_encoder
def UpperCamelCase__ ( self , __magic_name__ ):
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" )
lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ )
if config is None:
lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ):
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.question_encoder
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.generator
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __magic_name__ , )
if max_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : int = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : Dict = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[Any] = labels["""input_ids"""]
return model_inputs
| 681 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCamelCase =object()
# For specifying empty leaf dict `{}`
_lowerCamelCase =object()
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[Any] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(lowerCamelCase ) - len(lowerCamelCase ) + 1 ):
lowerCamelCase : Dict = [x.match(lowerCamelCase ) for x, y in zip(lowerCamelCase, ks[i:] )]
if matches and all(lowerCamelCase ):
return True
return False
def _a ( lowerCamelCase ):
def replace(lowerCamelCase, lowerCamelCase ):
for rule, replacement in rules:
if _match(lowerCamelCase, lowerCamelCase ):
return replacement
return val
return replace
def _a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""", lowerCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""", lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase, """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""", lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase, """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""", lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = _get_partition_rules()
lowerCamelCase : Tuple = _replacement_rules(lowerCamelCase )
lowerCamelCase : List[str] = {k: _unmatched for k in flatten_dict(lowerCamelCase )}
lowerCamelCase : List[str] = {k: replace(lowerCamelCase, lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase ) )
| 681 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = F'''{sampling_rate}'''
lowerCamelCase : Optional[int] = """1"""
lowerCamelCase : Any = """f32le"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ):
lowerCamelCase : Dict = F'''{sampling_rate}'''
lowerCamelCase : List[Any] = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Any = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Dict = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase : Dict = platform.system()
if system == "Linux":
lowerCamelCase : Union[str, Any] = """alsa"""
lowerCamelCase : List[Any] = """default"""
elif system == "Darwin":
lowerCamelCase : List[Any] = """avfoundation"""
lowerCamelCase : List[Any] = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Any = """default"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase )
for item in iterator:
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ):
if stream_chunk_s is not None:
lowerCamelCase : int = stream_chunk_s
else:
lowerCamelCase : Dict = chunk_length_s
lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase : Optional[int] = np.intaa
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : int = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase, (int, float) ):
lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s]
lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : List[Any] = datetime.datetime.now()
lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase )
lowerCamelCase : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : Optional[int] = B""""""
lowerCamelCase , lowerCamelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : str = (_stride_left, stride_right)
lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : Optional[int] = False
yield item
lowerCamelCase : str = stride_left
lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[Any] = False
yield item
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 681 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( lowerCamelCase = 3 ):
if isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowerCamelCase ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
lowerCamelCase : Union[str, Any] = QuantumRegister(lowerCamelCase, """qr""" )
lowerCamelCase : Optional[Any] = ClassicalRegister(lowerCamelCase, """cr""" )
lowerCamelCase : Optional[int] = QuantumCircuit(lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = number_of_qubits
for i in range(lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j), lowerCamelCase, lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowerCamelCase, number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowerCamelCase, lowerCamelCase )
# simulate with 10000 shots
lowerCamelCase : Any = Aer.get_backend("""qasm_simulator""" )
lowerCamelCase : List[Any] = execute(lowerCamelCase, lowerCamelCase, shots=1_0000 )
return job.result().get_counts(lowerCamelCase )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 681 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , )
assert hasattr(self , """env""" )
def UpperCamelCase__ ( self , __magic_name__ ):
# configuration for running training on smdistributed Model Parallel
lowerCamelCase : Any = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCamelCase : Any = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , )
def UpperCamelCase__ ( self , __magic_name__ ):
TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCamelCase__ ( self , __magic_name__ ):
# create estimator
lowerCamelCase : int = self.create_estimator(__magic_name__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
| 681 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase ={
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 |
from __future__ import annotations
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = str(lowerCamelCase )
return n == n[::-1]
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = 0
for i in range(1, lowerCamelCase ):
if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 681 | 1 |
# flake8: noqa
# Lint as: python3
_lowerCamelCase =[
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 681 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[Any] = """"""
else:
lowerCamelCase : Optional[int] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : Any = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCamelCase : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = dct.pop(lowerCamelCase )
lowerCamelCase : str = val
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = ViTMSNConfig()
lowerCamelCase : Tuple = 1000
lowerCamelCase : List[Any] = """datasets/huggingface/label-files"""
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) )
lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[int] = idalabel
lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase : int = 384
lowerCamelCase : Optional[int] = 1536
lowerCamelCase : Tuple = 6
elif "l16" in checkpoint_url:
lowerCamelCase : Dict = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Optional[int] = 24
lowerCamelCase : str = 16
lowerCamelCase : str = 0.1
elif "b4" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowerCamelCase : Tuple = 7
lowerCamelCase : Optional[int] = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Tuple = 24
lowerCamelCase : Dict = 16
lowerCamelCase : str = 0.1
lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase )
lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""]
lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase )
lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
lowerCamelCase : Union[str, Any] = ViTImageProcessor(
size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase )
lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : int = model(**lowerCamelCase )
lowerCamelCase : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 681 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 |
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["""ConvNextFeatureExtractor"""]
_lowerCamelCase =["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 681 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 | 1 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
lowerCamelCase , lowerCamelCase : List[Any] = emb.weight.shape
lowerCamelCase : List[Any] = nn.Linear(lowerCamelCase, lowerCamelCase, bias=lowerCamelCase )
lowerCamelCase : Tuple = emb.weight.data
return lin_layer
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )
lowerCamelCase : Any = Namespace(**checkpoint["""cfg"""]["""model"""] )
lowerCamelCase : int = checkpoint["""model"""]
remove_ignore_keys_(lowerCamelCase )
lowerCamelCase : Optional[Any] = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCamelCase : str = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
lowerCamelCase : int = XGLMConfig(
vocab_size=lowerCamelCase, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
lowerCamelCase : Dict = XGLMForCausalLM(lowerCamelCase )
lowerCamelCase : Optional[Any] = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
print(lowerCamelCase )
lowerCamelCase : Any = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCamelCase =parser.parse_args()
_lowerCamelCase =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 681 |
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = """roberta-prelayernorm"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : str = vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : List[Any] = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : Optional[int] = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = layer_norm_eps
lowerCamelCase : Union[str, Any] = position_embedding_type
lowerCamelCase : int = use_cache
lowerCamelCase : str = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 681 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ):
lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Union[str, Any] = max_resolution
lowerCamelCase : Union[str, Any] = do_resize
lowerCamelCase : int = size
lowerCamelCase : int = do_center_crop
lowerCamelCase : Union[str, Any] = crop_size
lowerCamelCase : Union[str, Any] = do_normalize
lowerCamelCase : Dict = image_mean
lowerCamelCase : Optional[Any] = image_std
lowerCamelCase : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase : Dict = []
for i in range(self.batch_size ):
lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ )
lowerCamelCase : Any = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 681 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Dict = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : Any = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : int = False
_UpperCAmelCase : List[Any] = False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
lowerCamelCase : int = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
lowerCamelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=3_2 , __magic_name__=2 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
lowerCamelCase : Any = parent
lowerCamelCase : int = batch_size
lowerCamelCase : Dict = seq_length
lowerCamelCase : Union[str, Any] = is_training
lowerCamelCase : int = use_input_mask
lowerCamelCase : Union[str, Any] = use_token_type_ids
lowerCamelCase : int = use_labels
lowerCamelCase : Dict = vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : List[str] = max_position_embeddings
lowerCamelCase : Tuple = type_vocab_size
lowerCamelCase : int = type_sequence_label_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[int] = num_choices
lowerCamelCase : Union[str, Any] = scope
lowerCamelCase : Tuple = embedding_size
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Dict = None
if self.use_token_type_ids:
lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Any = None
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Any = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = TFMobileBertModel(config=__magic_name__ )
lowerCamelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase : Dict = model(__magic_name__ )
lowerCamelCase : Optional[Any] = [input_ids, input_mask]
lowerCamelCase : List[Any] = model(__magic_name__ )
lowerCamelCase : Dict = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[int] = TFMobileBertForMaskedLM(config=__magic_name__ )
lowerCamelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase : List[Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=__magic_name__ )
lowerCamelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = TFMobileBertForPreTraining(config=__magic_name__ )
lowerCamelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : int = TFMobileBertForSequenceClassification(config=__magic_name__ )
lowerCamelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = self.num_choices
lowerCamelCase : Tuple = TFMobileBertForMultipleChoice(config=__magic_name__ )
lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Optional[int] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Optional[int] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Any = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase : Dict = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[int] = self.num_labels
lowerCamelCase : Dict = TFMobileBertForTokenClassification(config=__magic_name__ )
lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = TFMobileBertForQuestionAnswering(config=__magic_name__ )
lowerCamelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
lowerCamelCase : Tuple = TFMobileBertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
lowerCamelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : List[str] = model(__magic_name__ )[0]
lowerCamelCase : Tuple = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , __magic_name__ )
lowerCamelCase : List[str] = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1e-4 )
| 681 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681 | 1 |
def _a ( lowerCamelCase ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
lowerCamelCase : Optional[Any] = gray_code_sequence_string(lowerCamelCase )
#
# convert them to integers
for i in range(len(lowerCamelCase ) ):
lowerCamelCase : Tuple = int(sequence[i], 2 )
return sequence
def _a ( lowerCamelCase ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCamelCase : Optional[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCamelCase : Union[str, Any] = gray_code_sequence_string(bit_count - 1 )
lowerCamelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCamelCase : Any = """0""" + smaller_sequence[i]
sequence.append(lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCamelCase : Optional[int] = """1""" + smaller_sequence[i]
sequence.append(lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 681 | 1 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : int = parent
lowerCamelCase : List[str] = batch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Optional[Any] = embeddings_size
lowerCamelCase : Union[str, Any] = hidden_sizes
lowerCamelCase : Any = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : List[Any] = use_labels
lowerCamelCase : int = hidden_act
lowerCamelCase : Tuple = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[int] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : int = None
if self.use_labels:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : int = RegNetModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : str = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = RegNetForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : str = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = config_and_inputs
lowerCamelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_UpperCAmelCase : Tuple = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : int = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = RegNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : List[str] = [*signature.parameters.keys()]
lowerCamelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = model_class(config=__magic_name__ )
for name, module in model.named_modules():
if isinstance(__magic_name__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Tuple = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Any = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : str = layer_type
lowerCamelCase : Tuple = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = RegNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__magic_name__ )
lowerCamelCase : int = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : List[Any] = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 681 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCamelCase ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
_lowerCamelCase ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
_lowerCamelCase ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _a ( lowerCamelCase, lowerCamelCase ):
return float((preds == labels).mean() )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="binary" ):
lowerCamelCase : Union[str, Any] = simple_accuracy(lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = float(fa_score(y_true=lowerCamelCase, y_pred=lowerCamelCase, average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = {}
for id_pred, label in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
lowerCamelCase : Any = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase : List[Any] = [(pred, label)]
lowerCamelCase , lowerCamelCase : int = [], []
for question, preds_labels in question_map.items():
lowerCamelCase , lowerCamelCase : str = zip(*lowerCamelCase )
lowerCamelCase : List[str] = fa_score(y_true=lowerCamelCase, y_pred=lowerCamelCase, average="""macro""" )
fas.append(lowerCamelCase )
lowerCamelCase : str = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCamelCase : Any = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCamelCase : List[str] = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCamelCase : Dict = float(fa_score(y_true=lowerCamelCase, y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCamelCase__ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__magic_name__ , __magic_name__ )}
elif self.config_name == "cb":
return acc_and_fa(__magic_name__ , __magic_name__ , fa_avg="""macro""" )
elif self.config_name == "record":
lowerCamelCase : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCamelCase : int = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__magic_name__ , __magic_name__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__magic_name__ , __magic_name__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__magic_name__ , __magic_name__ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 681 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase ={
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
_lowerCamelCase ={"""mobilebert-uncased""": 5_1_2}
_lowerCamelCase ={}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = MobileBertTokenizer
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __magic_name__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __magic_name__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __magic_name__ ) != tokenize_chinese_chars
):
lowerCamelCase : Optional[Any] = getattr(__magic_name__ , normalizer_state.pop("""type""" ) )
lowerCamelCase : Any = do_lower_case
lowerCamelCase : Union[str, Any] = strip_accents
lowerCamelCase : Tuple = tokenize_chinese_chars
lowerCamelCase : Dict = normalizer_class(**__magic_name__ )
lowerCamelCase : Optional[Any] = do_lower_case
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=None ):
lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Optional[Any] = [self.sep_token_id]
lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase : Union[str, Any] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 681 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681 | 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowerCamelCase =logging.getLogger(__name__)
_lowerCamelCase =5_0 # max width of layer names
_lowerCamelCase =7_0 # max width of quantizer names
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""", type=lowerCamelCase, default=8, help="""weight precision""" )
group.add_argument("""--aprec""", type=lowerCamelCase, default=8, help="""activation precision""" )
group.add_argument("""--quant-per-tensor""", action="""store_true""", help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""", action="""store_true""", help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""", action="""store_true""", help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""", type=lowerCamelCase, nargs="""+""", help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""", type=lowerCamelCase, help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""", type=lowerCamelCase, help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""", default="""max""", help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""", default=lowerCamelCase, type=lowerCamelCase, help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""", action="""store_true""", help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""", metavar="""N""", type=lowerCamelCase, help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""", action="""store_true""", help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
), )
def _a ( lowerCamelCase ):
if args.calibrator == "max":
lowerCamelCase : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
lowerCamelCase : Optional[Any] = """histogram"""
elif args.calibrator == "mse":
lowerCamelCase : Optional[Any] = """histogram"""
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
lowerCamelCase : Optional[int] = QuantDescriptor(num_bits=args.aprec, calib_method=lowerCamelCase )
lowerCamelCase : List[Any] = QuantDescriptor(num_bits=args.wprec, axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=False ):
logger.info("""Configuring Model for Quantization""" )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase, ["""embeddings"""], which="""weight""", _disabled=lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase, [""""""], _disabled=lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase, args.quant_disable_keyword, _disabled=lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase, [R"""layer.\d+.""" + args.quant_disable_layer_module], _disabled=lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase, [R"""layer.\d+.""" + args.quant_enable_layer_module], _disabled=lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase, lowerCamelCase )
if args.clip_gelu:
clip_gelu(lowerCamelCase, args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase )
def _a ( lowerCamelCase ):
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _a ( lowerCamelCase, lowerCamelCase ):
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""", percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
def fusea(lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase, """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
lowerCamelCase : Optional[int] = qq._amax.detach().item()
lowerCamelCase : Union[str, Any] = qk._amax.detach().item()
lowerCamelCase : Tuple = qv._amax.detach().item()
lowerCamelCase : int = max(lowerCamelCase, lowerCamelCase, lowerCamelCase )
qq._amax.fill_(lowerCamelCase )
qk._amax.fill_(lowerCamelCase )
qv._amax.fill_(lowerCamelCase )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer )
def _a ( lowerCamelCase, lowerCamelCase ):
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
lowerCamelCase : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase )
lowerCamelCase : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase, """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
lowerCamelCase : Dict = mod.weight.shape[0]
lowerCamelCase : Tuple = mod._weight_quantizer._amax.detach()
lowerCamelCase : Tuple = torch.ones(lowerCamelCase, dtype=amax.dtype, device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase, """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer, """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCamelCase : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCamelCase : str = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCamelCase : Optional[Any] = pytorch_quantization.utils.reduce_amax(mod.weight, axis=lowerCamelCase, keepdims=lowerCamelCase ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowerCamelCase : Optional[int] = amax
def _a ( lowerCamelCase, lowerCamelCase=25, lowerCamelCase=180, lowerCamelCase=None ):
if ignore is None:
lowerCamelCase : Union[str, Any] = []
elif not isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = [ignore]
lowerCamelCase : Any = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase, """weight""" ):
continue
lowerCamelCase : str = max(lowerCamelCase, len(lowerCamelCase ) )
for name, mod in model.named_modules():
lowerCamelCase : Optional[Any] = getattr(lowerCamelCase, """_input_quantizer""", lowerCamelCase )
lowerCamelCase : Union[str, Any] = getattr(lowerCamelCase, """_weight_quantizer""", lowerCamelCase )
if not hasattr(lowerCamelCase, """weight""" ):
continue
if type(lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase ) is str and s in name]:
continue
lowerCamelCase : str = F'''Act:{input_q.extra_repr()}'''
lowerCamelCase : List[str] = F'''Wgt:{weight_q.extra_repr()}'''
lowerCamelCase : Union[str, Any] = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCamelCase ) <= line_width:
logger.info(lowerCamelCase )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase, pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = getattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase, lowerCamelCase )
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="both", **lowerCamelCase ):
lowerCamelCase : int = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCamelCase, lowerCamelCase, """_input_quantizer""", lowerCamelCase, lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase, lowerCamelCase, """_weight_quantizer""", lowerCamelCase, lowerCamelCase )
logger.info(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, **lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase, """_input_quantizer""" ) or hasattr(lowerCamelCase, """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase, lowerCamelCase ):
set_quantizers(lowerCamelCase, lowerCamelCase, **lowerCamelCase )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
logger.info(lowerCamelCase )
| 681 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 | 1 |
_lowerCamelCase =tuple[float, float, float]
_lowerCamelCase =tuple[float, float, float]
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = end_pointa[0] - end_pointa[0]
lowerCamelCase : Any = end_pointa[1] - end_pointa[1]
lowerCamelCase : Optional[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCamelCase : Optional[Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCamelCase : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( lowerCamelCase, lowerCamelCase ):
return tuple(round(lowerCamelCase, lowerCamelCase ) for x in vector ) == (0, 0, 0)
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 10 ):
lowerCamelCase : str = create_vector(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = create_vector(lowerCamelCase, lowerCamelCase )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase, lowerCamelCase ), lowerCamelCase )
| 681 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCamelCase ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCamelCase ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase : Any = numpy_to_pil(lowerCamelCase )
return images
def _a ( lowerCamelCase ):
if images.ndim == 3:
lowerCamelCase : Optional[Any] = images[None, ...]
lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images]
else:
lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 681 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCAmelCase : Optional[List[bool]]
_UpperCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 681 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="""▁"""
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = BertGenerationTokenizer
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """<s>"""
lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """Hello World!"""
lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase : str = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase : Dict = """ """.join(__magic_name__ )
lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = BertGenerationConfig()
lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 681 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase ):
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
lowerCamelCase : Any = number_of_bytes // partitions
lowerCamelCase : Union[str, Any] = []
for i in range(lowerCamelCase ):
lowerCamelCase : Tuple = i * bytes_per_partition + 1
lowerCamelCase : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase =HfArgumentParser(InitializationArguments)
_lowerCamelCase =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 681 | 1 |
from typing import Any
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : Optional[int] = data
lowerCamelCase : Tuple = None
def __repr__( self ):
return F'''Node({self.data})'''
class A__ :
def __init__( self ):
lowerCamelCase : int = None
def __iter__( self ):
lowerCamelCase : int = self.head
while node:
yield node.data
lowerCamelCase : Union[str, Any] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(__magic_name__ ) for item in self] )
def __getitem__( self , __magic_name__ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __magic_name__ , __magic_name__ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
lowerCamelCase : List[Any] = self.head
for _ in range(__magic_name__ ):
lowerCamelCase : Dict = current.next
lowerCamelCase : List[Any] = data
def UpperCamelCase__ ( self , __magic_name__ ):
self.insert_nth(len(self ) , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
self.insert_nth(0 , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
lowerCamelCase : List[Any] = Node(__magic_name__ )
if self.head is None:
lowerCamelCase : Optional[int] = new_node
elif index == 0:
lowerCamelCase : Any = self.head # link new_node to head
lowerCamelCase : Any = new_node
else:
lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
lowerCamelCase : Optional[Any] = temp.next
lowerCamelCase : List[Any] = temp.next
lowerCamelCase : Union[str, Any] = new_node
def UpperCamelCase__ ( self ): # print every node data
print(self )
def UpperCamelCase__ ( self ):
return self.delete_nth(0 )
def UpperCamelCase__ ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , __magic_name__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
lowerCamelCase : str = self.head # default first node
if index == 0:
lowerCamelCase : Tuple = self.head.next
else:
lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCamelCase : Optional[int] = temp.next
lowerCamelCase : Optional[int] = temp.next
lowerCamelCase : Dict = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ):
return self.head is None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = None
lowerCamelCase : str = self.head
while current:
# Store the current node's next node.
lowerCamelCase : int = current.next
# Make the current node's next point backwards
lowerCamelCase : Union[str, Any] = prev
# Make the previous node be the current node
lowerCamelCase : Optional[Any] = current
# Make the current node the next node (to progress iteration)
lowerCamelCase : Tuple = next_node
# Return prev in order to put the head at the end
lowerCamelCase : Tuple = prev
def _a ( ):
lowerCamelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCamelCase ) == i
linked_list.insert_nth(lowerCamelCase, i + 1 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1, 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(0, 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCamelCase ) == 9
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1, 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True
for i in range(0, 9 ):
lowerCamelCase : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True
linked_list.reverse()
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(-8, 1 ) )
def _a ( ):
lowerCamelCase : str = [
-9,
100,
Node(7734_5112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
lowerCamelCase : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCamelCase : Dict = linked_list.delete_head()
assert result == -9
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCamelCase : Optional[Any] = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCamelCase : Any = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCamelCase )
assert (
str(lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _a ( ):
from doctest import testmod
testmod()
lowerCamelCase : List[str] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(lowerCamelCase )
print("""\nReading/changing Node data using indexing:""" )
print(F'''Element at Position 1: {linked_list[1]}''' )
lowerCamelCase : Any = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(lowerCamelCase )
print(F'''length of linked_list is : {len(lowerCamelCase )}''' )
if __name__ == "__main__":
main()
| 681 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self , __magic_name__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """sshleifer/tiny-gpt2"""
lowerCamelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """sgugger/tiny-distilbert-classification"""
lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """sshleifer/tiny-gpt2"""
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , )
lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , """sequential""" ) )
self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) )
self.assertTrue(hasattr(__magic_name__ , """current""" ) )
self.assertTrue(hasattr(__magic_name__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
| 681 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Union[str, Any] = IFPipeline
_UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
_UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ):
return self._get_dummy_components()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=0 ):
if str(__magic_name__ ).startswith("""mps""" ):
lowerCamelCase : str = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase : Tuple = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCamelCase__ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase__ ( self ):
self._test_save_load_local()
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
# if
lowerCamelCase : Union[str, Any] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
lowerCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=__magic_name__ , tokenizer=__magic_name__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
lowerCamelCase , lowerCamelCase : Optional[int] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCamelCase : Dict = None
lowerCamelCase : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCamelCase : List[str] = IFImgaImgPipeline(**pipe_a.components )
lowerCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCamelCase : str = IFInpaintingPipeline(**pipe_a.components )
lowerCamelCase : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase : Dict = pipe_a(
prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , num_inference_steps=2 , generator=__magic_name__ , output_type="""np""" , )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(__magic_name__ )
lowerCamelCase : Dict = pipe_a(
prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , image=__magic_name__ , generator=__magic_name__ , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase : List[str] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCamelCase : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCamelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(__magic_name__ )
lowerCamelCase : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase : int = pipe_a(
prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , image=__magic_name__ , num_inference_steps=2 , generator=__magic_name__ , output_type="""np""" , )
lowerCamelCase : List[str] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCamelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase : Any = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(__magic_name__ )
lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(__magic_name__ )
lowerCamelCase : Dict = pipe_a(
prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , image=__magic_name__ , original_image=__magic_name__ , generator=__magic_name__ , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase : str = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCamelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCamelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(__magic_name__ )
lowerCamelCase : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(__magic_name__ )
lowerCamelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase : int = pipe_a(
prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , num_inference_steps=2 , generator=__magic_name__ , output_type="""np""" , )
lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCamelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(__magic_name__ )
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(__magic_name__ )
lowerCamelCase : Tuple = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(__magic_name__ )
lowerCamelCase : str = pipe_a(
prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , original_image=__magic_name__ , generator=__magic_name__ , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase : Any = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCamelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
def _a ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 681 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _a ( lowerCamelCase ):
return x + 2
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """x = 3"""
lowerCamelCase : Tuple = {}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
lowerCamelCase : Optional[int] = """x = y"""
lowerCamelCase : Tuple = {"""y""": 5}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """y = add_two(x)"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """x = 3"""
lowerCamelCase : Dict = {}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """x = 3\ny = 5"""
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """text = f'This is x: {x}.'"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowerCamelCase : Tuple = {"""x""": 3}
lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} )
lowerCamelCase : Tuple = {"""x""": 8}
lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = """test_list = [x, add_two(x)]"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """y = x"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowerCamelCase : Any = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowerCamelCase : Dict = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i"""
lowerCamelCase : int = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
| 681 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = GPTaTokenizer
_UpperCAmelCase : Any = GPTaTokenizerFast
_UpperCAmelCase : str = True
_UpperCAmelCase : str = {"""add_prefix_space""": True}
_UpperCAmelCase : Optional[int] = False
def UpperCamelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
lowerCamelCase : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase : Optional[int] = {"""unk_token""": """<unk>"""}
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def UpperCamelCase__ ( self , **__magic_name__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCamelCase__ ( self , **__magic_name__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : int = """lower newer"""
lowerCamelCase : str = """lower newer"""
return input_text, output_text
def UpperCamelCase__ ( self ):
lowerCamelCase : int = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase : Dict = """lower newer"""
lowerCamelCase : Tuple = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCamelCase : Optional[int] = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[int] = tokens + [tokenizer.unk_token]
lowerCamelCase : Union[str, Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
if not self.test_rust_tokenizer:
return
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : str = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
lowerCamelCase : List[Any] = """lower newer"""
# Testing tokenization
lowerCamelCase : int = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase : List[str] = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids without special tokens
lowerCamelCase : Union[str, Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase : Tuple = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids with special tokens
lowerCamelCase : Any = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
lowerCamelCase : Union[str, Any] = tokenizer.encode(__magic_name__ , add_prefix_space=__magic_name__ )
lowerCamelCase : Optional[Any] = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing the unknown token
lowerCamelCase : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowerCamelCase : Optional[int] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase__ ( self , __magic_name__=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
# Simple input
lowerCamelCase : Tuple = """This is a simple input"""
lowerCamelCase : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCamelCase : Union[str, Any] = ("""This is a simple input""", """This is a pair""")
lowerCamelCase : Union[str, Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
lowerCamelCase : Optional[int] = """This is a simple input"""
lowerCamelCase : str = ["""This is a simple input looooooooong""", """This is a simple input"""]
lowerCamelCase : Optional[Any] = ("""This is a simple input""", """This is a pair""")
lowerCamelCase : Dict = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
lowerCamelCase : Dict = tokenizer.pad_token_id
lowerCamelCase : Optional[int] = tokenizer(__magic_name__ , padding="""max_length""" , max_length=3_0 , return_tensors="""np""" )
lowerCamelCase : Optional[int] = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
lowerCamelCase : List[Any] = tokenizer(*__magic_name__ , padding="""max_length""" , max_length=6_0 , return_tensors="""np""" )
lowerCamelCase : List[Any] = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """$$$"""
lowerCamelCase : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__magic_name__ , add_bos_token=__magic_name__ )
lowerCamelCase : Tuple = """This is a simple input"""
lowerCamelCase : Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCamelCase : str = tokenizer.bos_token_id
lowerCamelCase : Tuple = tokenizer(__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer(__magic_name__ )
self.assertEqual(out_s.input_ids[0] , __magic_name__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase : Tuple = tokenizer.decode(out_s.input_ids )
lowerCamelCase : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __magic_name__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowerCamelCase : Dict = [self.get_tokenizer(do_lower_case=__magic_name__ , add_bos_token=__magic_name__ )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase : Union[str, Any] = """Encode this."""
lowerCamelCase : Union[str, Any] = """This one too please."""
lowerCamelCase : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
encoded_sequence += tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
lowerCamelCase : Union[str, Any] = tokenizer.encode_plus(
__magic_name__ , __magic_name__ , add_special_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , )
lowerCamelCase : Tuple = encoded_sequence_dict["""input_ids"""]
lowerCamelCase : Any = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
lowerCamelCase : Tuple = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__magic_name__ )
]
lowerCamelCase : Tuple = [x for x in filtered_sequence if x is not None]
self.assertEqual(__magic_name__ , __magic_name__ )
@require_tokenizers
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=__magic_name__ )
lowerCamelCase : str = """A photo of a cat"""
lowerCamelCase : Tuple = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("""test_opt""" )
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""./test_opt""" )
lowerCamelCase : List[Any] = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=__magic_name__ )
lowerCamelCase : List[str] = """A photo of a cat"""
lowerCamelCase : Union[str, Any] = tokenizer.encode(
__magic_name__ , )
# Same as above
self.assertEqual(__magic_name__ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=__magic_name__ )
lowerCamelCase : Union[str, Any] = """bos"""
lowerCamelCase : List[str] = tokenizer.get_vocab()["""bos"""]
lowerCamelCase : List[Any] = """A photo of a cat"""
lowerCamelCase : List[Any] = tokenizer.encode(
__magic_name__ , )
# We changed the bos token
self.assertEqual(__magic_name__ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("""./tok""" )
lowerCamelCase : str = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
lowerCamelCase : Dict = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 681 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A__ :
def __init__( self , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="resnet50" , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , ):
lowerCamelCase : Optional[Any] = parent
lowerCamelCase : Tuple = out_indices if out_indices is not None else [4]
lowerCamelCase : int = stage_names
lowerCamelCase : Dict = out_features
lowerCamelCase : Any = backbone
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : List[str] = image_size
lowerCamelCase : List[str] = num_channels
lowerCamelCase : int = use_pretrained_backbone
lowerCamelCase : int = is_training
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : List[str] = self.get_config()
return config, pixel_values
def UpperCamelCase__ ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = TimmBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(__magic_name__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase : int = config_and_inputs
lowerCamelCase : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : List[str] = (TimmBackbone,) if is_torch_available() else ()
_UpperCAmelCase : Dict = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : int = False
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = TimmBackboneModelTester(self )
lowerCamelCase : Tuple = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """resnet18"""
lowerCamelCase : List[Any] = """microsoft/resnet-18"""
lowerCamelCase : Dict = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ )
lowerCamelCase : str = AutoBackbone.from_pretrained(__magic_name__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCamelCase : Dict = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ , out_indices=[1, 2, 3] )
lowerCamelCase : Union[str, Any] = AutoBackbone.from_pretrained(__magic_name__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(__magic_name__ )
lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : List[Any] = [*signature.parameters.keys()]
lowerCamelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Optional[Any] = True
lowerCamelCase : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCamelCase : str = self.all_model_classes[0]
lowerCamelCase : int = model_class(__magic_name__ )
model.to(__magic_name__ )
lowerCamelCase : List[str] = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[int] = model(**__magic_name__ )
lowerCamelCase : str = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCamelCase : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCamelCase : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__magic_name__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : int = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCamelCase : int = copy.deepcopy(__magic_name__ )
lowerCamelCase : str = None
lowerCamelCase : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Tuple = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCamelCase : Union[str, Any] = copy.deepcopy(__magic_name__ )
lowerCamelCase : str = False
lowerCamelCase : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase : Union[str, Any] = model(**__magic_name__ )
| 681 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCamelCase =logging.get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = question_encoder
lowerCamelCase : Dict = generator
lowerCamelCase : Tuple = self.question_encoder
def UpperCamelCase__ ( self , __magic_name__ ):
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" )
lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ )
if config is None:
lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ):
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.question_encoder
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.generator
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __magic_name__ , )
if max_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : int = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : Dict = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[Any] = labels["""input_ids"""]
return model_inputs
| 681 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=0.2 , __magic_name__=0.2 ):
lowerCamelCase : str = bp_numa
lowerCamelCase : List[Any] = bp_numa
lowerCamelCase : List[Any] = bp_numa
lowerCamelCase : Tuple = conva_get[:2]
lowerCamelCase : str = conva_get[2]
lowerCamelCase : Union[str, Any] = size_pa
lowerCamelCase : Union[str, Any] = rate_w
lowerCamelCase : str = rate_t
lowerCamelCase : int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCamelCase : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase : str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCamelCase : Dict = -2 * np.random.rand(self.num_bpa ) + 1
lowerCamelCase : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def UpperCamelCase__ ( self , __magic_name__ ):
# save model dict with pickle
lowerCamelCase : List[str] = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(__magic_name__ , """wb""" ) as f:
pickle.dump(__magic_name__ , __magic_name__ )
print(F'''Model saved: {save_path}''' )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ):
# read saved model
with open(__magic_name__ , """rb""" ) as f:
lowerCamelCase : List[Any] = pickle.load(__magic_name__ ) # noqa: S301
lowerCamelCase : int = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
lowerCamelCase : List[Any] = model_dic.get("""size_pooling1""" )
lowerCamelCase : List[str] = model_dic.get("""num_bp1""" )
lowerCamelCase : Optional[Any] = model_dic.get("""num_bp2""" )
lowerCamelCase : Optional[int] = model_dic.get("""num_bp3""" )
lowerCamelCase : Union[str, Any] = model_dic.get("""rate_weight""" )
lowerCamelCase : Union[str, Any] = model_dic.get("""rate_thre""" )
# create model instance
lowerCamelCase : List[str] = CNN(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# modify model parameter
lowerCamelCase : Tuple = model_dic.get("""w_conv1""" )
lowerCamelCase : Optional[int] = model_dic.get("""wkj""" )
lowerCamelCase : int = model_dic.get("""vji""" )
lowerCamelCase : Dict = model_dic.get("""thre_conv1""" )
lowerCamelCase : Tuple = model_dic.get("""thre_bp2""" )
lowerCamelCase : List[str] = model_dic.get("""thre_bp3""" )
return conv_ins
def UpperCamelCase__ ( self , __magic_name__ ):
return 1 / (1 + np.exp(-1 * x ))
def UpperCamelCase__ ( self , __magic_name__ ):
return round(__magic_name__ , 3 )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
# convolution process
lowerCamelCase : Dict = convs[0]
lowerCamelCase : int = convs[1]
lowerCamelCase : str = np.shape(__magic_name__ )[0]
# get the data slice of original image data, data_focus
lowerCamelCase : int = []
for i_focus in range(0 , size_data - size_conv + 1 , __magic_name__ ):
for j_focus in range(0 , size_data - size_conv + 1 , __magic_name__ ):
lowerCamelCase : str = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__magic_name__ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCamelCase : Any = []
lowerCamelCase : int = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__magic_name__ ):
lowerCamelCase : int = []
for i_focus in range(len(__magic_name__ ) ):
lowerCamelCase : int = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__magic_name__ ) )
lowerCamelCase : Dict = np.asmatrix(__magic_name__ ).reshape(
__magic_name__ , __magic_name__ )
data_featuremap.append(__magic_name__ )
# expanding the data slice to One dimenssion
lowerCamelCase : Tuple = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__magic_name__ ) )
lowerCamelCase : int = np.asarray(__magic_name__ )
return focus_list, data_featuremap
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__="average_pool" ):
# pooling process
lowerCamelCase : Optional[int] = len(featuremaps[0] )
lowerCamelCase : str = int(size_map / size_pooling )
lowerCamelCase : Optional[int] = []
for i_map in range(len(__magic_name__ ) ):
lowerCamelCase : Union[str, Any] = featuremaps[i_map]
lowerCamelCase : List[str] = []
for i_focus in range(0 , __magic_name__ , __magic_name__ ):
for j_focus in range(0 , __magic_name__ , __magic_name__ ):
lowerCamelCase : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__magic_name__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__magic_name__ ) )
lowerCamelCase : Tuple = np.asmatrix(__magic_name__ ).reshape(__magic_name__ , __magic_name__ )
featuremap_pooled.append(__magic_name__ )
return featuremap_pooled
def UpperCamelCase__ ( self , __magic_name__ ):
# expanding three dimension data to one dimension list
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
lowerCamelCase : List[str] = np.shape(data[i] )
lowerCamelCase : Any = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCamelCase : Any = data_listed.getA().tolist()[0]
data_expanded.extend(__magic_name__ )
lowerCamelCase : Optional[int] = np.asarray(__magic_name__ )
return data_expanded
def UpperCamelCase__ ( self , __magic_name__ ):
# expanding matrix to one dimension list
lowerCamelCase : str = np.asarray(__magic_name__ )
lowerCamelCase : Any = np.shape(__magic_name__ )
lowerCamelCase : int = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = []
lowerCamelCase : str = 0
for i_map in range(__magic_name__ ):
lowerCamelCase : List[str] = np.ones((size_map, size_map) )
for i in range(0 , __magic_name__ , __magic_name__ ):
for j in range(0 , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[int] = pd_pool[
i_pool
]
lowerCamelCase : Union[str, Any] = i_pool + 1
lowerCamelCase : Tuple = np.multiply(
__magic_name__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__magic_name__ )
return pd_all
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(__magic_name__ )) )
print((""" - - Shape: Teach_Data """, np.shape(__magic_name__ )) )
lowerCamelCase : Dict = 0
lowerCamelCase : int = []
lowerCamelCase : Union[str, Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowerCamelCase : Optional[int] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(__magic_name__ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCamelCase : Optional[int] = np.asmatrix(datas_train[p] )
lowerCamelCase : List[Any] = np.asarray(datas_teach[p] )
lowerCamelCase , lowerCamelCase : Optional[Any] = self.convolute(
__magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase : Tuple = self.pooling(__magic_name__ , self.size_poolinga )
lowerCamelCase : Any = np.shape(__magic_name__ )
lowerCamelCase : Optional[Any] = self._expand(__magic_name__ )
lowerCamelCase : List[Any] = data_bp_input
lowerCamelCase : Dict = np.dot(__magic_name__ , self.vji.T ) - self.thre_bpa
lowerCamelCase : List[Any] = self.sig(__magic_name__ )
lowerCamelCase : List[Any] = np.dot(__magic_name__ , self.wkj.T ) - self.thre_bpa
lowerCamelCase : Optional[int] = self.sig(__magic_name__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCamelCase : Optional[int] = np.multiply(
(data_teach - bp_outa) , np.multiply(__magic_name__ , (1 - bp_outa) ) )
lowerCamelCase : int = np.multiply(
np.dot(__magic_name__ , self.wkj ) , np.multiply(__magic_name__ , (1 - bp_outa) ) )
lowerCamelCase : Tuple = np.dot(__magic_name__ , self.vji )
lowerCamelCase : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCamelCase : int = pd_conva_pooled.T.getA().tolist()
lowerCamelCase : Tuple = self._calculate_gradient_from_pool(
__magic_name__ , __magic_name__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowerCamelCase : str = self.rate_weight * np.dot(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCamelCase : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCamelCase : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCamelCase : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCamelCase : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
lowerCamelCase : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCamelCase : List[str] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCamelCase : str = rp + 1
lowerCamelCase : int = error_count / patterns
all_mse.append(__magic_name__ )
def draw_error():
lowerCamelCase : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__magic_name__ , """+-""" )
plt.plot(__magic_name__ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(__magic_name__ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def UpperCamelCase__ ( self , __magic_name__ ):
# model predict
lowerCamelCase : int = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(__magic_name__ )) )
for p in range(len(__magic_name__ ) ):
lowerCamelCase : Tuple = np.asmatrix(datas_test[p] )
lowerCamelCase , lowerCamelCase : Optional[int] = self.convolute(
__magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase : Any = self.pooling(__magic_name__ , self.size_poolinga )
lowerCamelCase : Union[str, Any] = self._expand(__magic_name__ )
lowerCamelCase : Optional[Any] = data_bp_input
lowerCamelCase : List[str] = bp_outa * self.vji.T - self.thre_bpa
lowerCamelCase : Tuple = self.sig(__magic_name__ )
lowerCamelCase : Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
lowerCamelCase : Union[str, Any] = self.sig(__magic_name__ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCamelCase : List[Any] = [list(map(self.do_round , __magic_name__ ) ) for each in produce_out]
return np.asarray(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
# return the data of image after convoluting process so we can check it out
lowerCamelCase : Optional[Any] = np.asmatrix(__magic_name__ )
lowerCamelCase , lowerCamelCase : Any = self.convolute(
__magic_name__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase : str = self.pooling(__magic_name__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 681 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = F'''{sampling_rate}'''
lowerCamelCase : Optional[int] = """1"""
lowerCamelCase : Any = """f32le"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ):
lowerCamelCase : Dict = F'''{sampling_rate}'''
lowerCamelCase : List[Any] = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Any = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Dict = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase : Dict = platform.system()
if system == "Linux":
lowerCamelCase : Union[str, Any] = """alsa"""
lowerCamelCase : List[Any] = """default"""
elif system == "Darwin":
lowerCamelCase : List[Any] = """avfoundation"""
lowerCamelCase : List[Any] = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Any = """default"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase )
for item in iterator:
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ):
if stream_chunk_s is not None:
lowerCamelCase : int = stream_chunk_s
else:
lowerCamelCase : Dict = chunk_length_s
lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase : Optional[int] = np.intaa
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : int = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase, (int, float) ):
lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s]
lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : List[Any] = datetime.datetime.now()
lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase )
lowerCamelCase : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : Optional[int] = B""""""
lowerCamelCase , lowerCamelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : str = (_stride_left, stride_right)
lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : Optional[int] = False
yield item
lowerCamelCase : str = stride_left
lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[Any] = False
yield item
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 681 | 1 |
import numpy as np
def _a ( lowerCamelCase ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , )
assert hasattr(self , """env""" )
def UpperCamelCase__ ( self , __magic_name__ ):
# configuration for running training on smdistributed Model Parallel
lowerCamelCase : Any = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCamelCase : Any = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , )
def UpperCamelCase__ ( self , __magic_name__ ):
TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCamelCase__ ( self , __magic_name__ ):
# create estimator
lowerCamelCase : int = self.create_estimator(__magic_name__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
| 681 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _a ( ):
lowerCamelCase : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""", type=lowerCamelCase, default=1, help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""", type=lowerCamelCase, help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
), )
# rest from the training program
parser.add_argument("""training_script_args""", nargs=lowerCamelCase )
return parser.parse_args()
def _a ( ):
lowerCamelCase : Union[str, Any] = parse_args()
# Import training_script as a module.
lowerCamelCase : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase : Union[str, Any] = script_fpath.stem
lowerCamelCase : int = importlib.import_module(lowerCamelCase )
# Patch sys.argv
lowerCamelCase : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 681 |
from __future__ import annotations
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = str(lowerCamelCase )
return n == n[::-1]
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = 0
for i in range(1, lowerCamelCase ):
if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 681 | 1 |
from __future__ import annotations
_lowerCamelCase =1_0
def _a ( lowerCamelCase ):
lowerCamelCase : List[Any] = 1
lowerCamelCase : List[str] = max(lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Dict = int((i / placement) % RADIX )
buckets[tmp].append(lowerCamelCase )
# put each buckets' contents into list_of_ints
lowerCamelCase : Tuple = 0
for b in range(lowerCamelCase ):
for i in buckets[b]:
lowerCamelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[Any] = """"""
else:
lowerCamelCase : Optional[int] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : Any = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCamelCase : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = dct.pop(lowerCamelCase )
lowerCamelCase : str = val
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = ViTMSNConfig()
lowerCamelCase : Tuple = 1000
lowerCamelCase : List[Any] = """datasets/huggingface/label-files"""
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) )
lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[int] = idalabel
lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase : int = 384
lowerCamelCase : Optional[int] = 1536
lowerCamelCase : Tuple = 6
elif "l16" in checkpoint_url:
lowerCamelCase : Dict = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Optional[int] = 24
lowerCamelCase : str = 16
lowerCamelCase : str = 0.1
elif "b4" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowerCamelCase : Tuple = 7
lowerCamelCase : Optional[int] = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Tuple = 24
lowerCamelCase : Dict = 16
lowerCamelCase : str = 0.1
lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase )
lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""]
lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase )
lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
lowerCamelCase : Union[str, Any] = ViTImageProcessor(
size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase )
lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : int = model(**lowerCamelCase )
lowerCamelCase : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 681 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = (DEISMultistepScheduler,)
_UpperCAmelCase : List[Any] = (("""num_inference_steps""", 25),)
def UpperCamelCase__ ( self , **__magic_name__ ):
lowerCamelCase : Dict = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**__magic_name__ )
return config
def UpperCamelCase__ ( self , __magic_name__=0 , **__magic_name__ ):
lowerCamelCase : List[str] = dict(self.forward_default_kwargs )
lowerCamelCase : Any = kwargs.pop("""num_inference_steps""" , __magic_name__ )
lowerCamelCase : Union[str, Any] = self.dummy_sample
lowerCamelCase : Dict = 0.1 * sample
lowerCamelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(**__magic_name__ )
lowerCamelCase : List[str] = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
lowerCamelCase : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
lowerCamelCase : str = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
lowerCamelCase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase , lowerCamelCase : Tuple = sample, sample
for t in range(__magic_name__ , time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase : str = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowerCamelCase : str = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self , __magic_name__=0 , **__magic_name__ ):
lowerCamelCase : Any = dict(self.forward_default_kwargs )
lowerCamelCase : List[Any] = kwargs.pop("""num_inference_steps""" , __magic_name__ )
lowerCamelCase : Optional[Any] = self.dummy_sample
lowerCamelCase : List[str] = 0.1 * sample
lowerCamelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Tuple = self.get_scheduler_config()
lowerCamelCase : str = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
lowerCamelCase : List[str] = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase : Union[str, Any] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowerCamelCase : List[Any] = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , __magic_name__=None , **__magic_name__ ):
if scheduler is None:
lowerCamelCase : Optional[int] = self.scheduler_classes[0]
lowerCamelCase : Tuple = self.get_scheduler_config(**__magic_name__ )
lowerCamelCase : int = scheduler_class(**__magic_name__ )
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Optional[int] = self.get_scheduler_config(**__magic_name__ )
lowerCamelCase : Optional[Any] = scheduler_class(**__magic_name__ )
lowerCamelCase : Dict = 1_0
lowerCamelCase : Optional[Any] = self.dummy_model()
lowerCamelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : List[str] = model(__magic_name__ , __magic_name__ )
lowerCamelCase : Union[str, Any] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = dict(self.forward_default_kwargs )
lowerCamelCase : Tuple = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase : int = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__magic_name__ )
lowerCamelCase : Dict = self.dummy_sample
lowerCamelCase : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
lowerCamelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCamelCase : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
lowerCamelCase : Dict = scheduler.timesteps[5]
lowerCamelCase : List[Any] = scheduler.timesteps[6]
lowerCamelCase : Any = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowerCamelCase : Optional[int] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCamelCase : Union[str, Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCamelCase : int = self.full_loop(scheduler=__magic_name__ )
lowerCamelCase : Dict = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
lowerCamelCase : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : str = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Union[str, Any] = self.full_loop(scheduler=__magic_name__ )
lowerCamelCase : str = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def UpperCamelCase__ ( self ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def UpperCamelCase__ ( self ):
self.check_over_configs(thresholding=__magic_name__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__magic_name__ , prediction_type=__magic_name__ , sample_max_value=__magic_name__ , algorithm_type="""deis""" , solver_order=__magic_name__ , solver_type=__magic_name__ , )
def UpperCamelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def UpperCamelCase__ ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__magic_name__ , solver_type=__magic_name__ , prediction_type=__magic_name__ , algorithm_type=__magic_name__ , )
lowerCamelCase : Optional[int] = self.full_loop(
solver_order=__magic_name__ , solver_type=__magic_name__ , prediction_type=__magic_name__ , algorithm_type=__magic_name__ , )
assert not torch.isnan(__magic_name__ ).any(), "Samples have nan numbers"
def UpperCamelCase__ ( self ):
self.check_over_configs(lower_order_final=__magic_name__ )
self.check_over_configs(lower_order_final=__magic_name__ )
def UpperCamelCase__ ( self ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.full_loop()
lowerCamelCase : List[str] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.full_loop(prediction_type="""v_prediction""" )
lowerCamelCase : Dict = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(thresholding=__magic_name__ , dynamic_thresholding_ratio=0 )
lowerCamelCase : int = scheduler_class(**__magic_name__ )
lowerCamelCase : Optional[Any] = 1_0
lowerCamelCase : Optional[Any] = self.dummy_model()
lowerCamelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : int = model(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[int] = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
assert sample.dtype == torch.floataa
| 681 |
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_lowerCamelCase =logging.getLogger(__name__)
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = np.argmax(lowerCamelCase, axis=1 )
return np.sum(outputs == labels )
def _a ( lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf_8""" ) as f:
lowerCamelCase : Union[str, Any] = csv.reader(lowerCamelCase )
lowerCamelCase : Dict = []
next(lowerCamelCase ) # skip the first line
for line in tqdm(lowerCamelCase ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = []
for dataset in encoded_datasets:
lowerCamelCase : Union[str, Any] = len(lowerCamelCase )
lowerCamelCase : List[str] = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
lowerCamelCase : Optional[int] = np.zeros((n_batch, 2), dtype=np.intaa )
lowerCamelCase : List[str] = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa )
lowerCamelCase : Union[str, Any] = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCamelCase ):
lowerCamelCase : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCamelCase : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCamelCase : Optional[int] = with_conta
lowerCamelCase : List[str] = with_conta
lowerCamelCase : Dict = len(lowerCamelCase ) - 1
lowerCamelCase : int = len(lowerCamelCase ) - 1
lowerCamelCase : Optional[Any] = with_conta
lowerCamelCase : List[Any] = with_conta
lowerCamelCase : List[Any] = mc_label
lowerCamelCase : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCamelCase ) for t in all_inputs ) )
return tensor_datasets
def _a ( ):
lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--model_name""", type=lowerCamelCase, default="""openai-gpt""", help="""pretrained model name""" )
parser.add_argument("""--do_train""", action="""store_true""", help="""Whether to run training.""" )
parser.add_argument("""--do_eval""", action="""store_true""", help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""", default=lowerCamelCase, type=lowerCamelCase, required=lowerCamelCase, help="""The output directory where the model predictions and checkpoints will be written.""", )
parser.add_argument("""--train_dataset""", type=lowerCamelCase, default="""""" )
parser.add_argument("""--eval_dataset""", type=lowerCamelCase, default="""""" )
parser.add_argument("""--seed""", type=lowerCamelCase, default=42 )
parser.add_argument("""--num_train_epochs""", type=lowerCamelCase, default=3 )
parser.add_argument("""--train_batch_size""", type=lowerCamelCase, default=8 )
parser.add_argument("""--eval_batch_size""", type=lowerCamelCase, default=16 )
parser.add_argument("""--adam_epsilon""", default=1e-8, type=lowerCamelCase, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""", type=lowerCamelCase, default=1 )
parser.add_argument(
"""--max_steps""", default=-1, type=lowerCamelCase, help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
), )
parser.add_argument(
"""--gradient_accumulation_steps""", type=lowerCamelCase, default=1, help="""Number of updates steps to accumulate before performing a backward/update pass.""", )
parser.add_argument("""--learning_rate""", type=lowerCamelCase, default=6.25e-5 )
parser.add_argument("""--warmup_steps""", default=0, type=lowerCamelCase, help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""", type=lowerCamelCase, default="""warmup_linear""" )
parser.add_argument("""--weight_decay""", type=lowerCamelCase, default=0.0_1 )
parser.add_argument("""--lm_coef""", type=lowerCamelCase, default=0.9 )
parser.add_argument("""--n_valid""", type=lowerCamelCase, default=374 )
parser.add_argument("""--server_ip""", type=lowerCamelCase, default="""""", help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""", type=lowerCamelCase, default="""""", help="""Can be used for distant debugging.""" )
lowerCamelCase : str = parser.parse_args()
print(lowerCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=lowerCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCamelCase : Optional[int] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowerCamelCase : int = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(lowerCamelCase, lowerCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCamelCase : Tuple = ["""_start_""", """_delimiter_""", """_classify_"""]
lowerCamelCase : List[str] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(lowerCamelCase )
lowerCamelCase : int = tokenizer.convert_tokens_to_ids(lowerCamelCase )
lowerCamelCase : List[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(lowerCamelCase ) )
model.to(lowerCamelCase )
# Load and encode the datasets
def tokenize_and_encode(lowerCamelCase ):
if isinstance(lowerCamelCase, lowerCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCamelCase ) )
elif isinstance(lowerCamelCase, lowerCamelCase ):
return obj
return [tokenize_and_encode(lowerCamelCase ) for o in obj]
logger.info("""Encoding dataset...""" )
lowerCamelCase : Optional[int] = load_rocstories_dataset(args.train_dataset )
lowerCamelCase : List[str] = load_rocstories_dataset(args.eval_dataset )
lowerCamelCase : Dict = (train_dataset, eval_dataset)
lowerCamelCase : List[Any] = tokenize_and_encode(lowerCamelCase )
# Compute the max input length for the Transformer
lowerCamelCase : str = model.config.n_positions // 2 - 2
lowerCamelCase : List[str] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCamelCase : int = min(lowerCamelCase, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCamelCase : str = pre_process_datasets(lowerCamelCase, lowerCamelCase, lowerCamelCase, *lowerCamelCase )
lowerCamelCase , lowerCamelCase : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCamelCase : str = TensorDataset(*lowerCamelCase )
lowerCamelCase : Tuple = RandomSampler(lowerCamelCase )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, sampler=lowerCamelCase, batch_size=args.train_batch_size )
lowerCamelCase : int = TensorDataset(*lowerCamelCase )
lowerCamelCase : List[str] = SequentialSampler(lowerCamelCase )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, sampler=lowerCamelCase, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCamelCase : Optional[int] = args.max_steps
lowerCamelCase : Optional[int] = args.max_steps // (len(lowerCamelCase ) // args.gradient_accumulation_steps) + 1
else:
lowerCamelCase : List[str] = len(lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCamelCase : Any = list(model.named_parameters() )
lowerCamelCase : str = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
lowerCamelCase : str = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
lowerCamelCase : Tuple = AdamW(lowerCamelCase, lr=args.learning_rate, eps=args.adam_epsilon )
lowerCamelCase : Tuple = get_linear_schedule_with_warmup(
lowerCamelCase, num_warmup_steps=args.warmup_steps, num_training_steps=lowerCamelCase )
if args.do_train:
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc="""Epoch""" ):
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Optional[int] = tqdm(lowerCamelCase, desc="""Training""" )
for step, batch in enumerate(lowerCamelCase ):
lowerCamelCase : Optional[int] = tuple(t.to(lowerCamelCase ) for t in batch )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = batch
lowerCamelCase : str = model(lowerCamelCase, mc_token_ids=lowerCamelCase, lm_labels=lowerCamelCase, mc_labels=lowerCamelCase )
lowerCamelCase : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCamelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCamelCase : Any = """Training loss: {:.2e} lr: {:.2e}""".format(lowerCamelCase, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCamelCase : Dict = model.module if hasattr(lowerCamelCase, """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCamelCase : Union[str, Any] = os.path.join(args.output_dir, lowerCamelCase )
lowerCamelCase : List[Any] = os.path.join(args.output_dir, lowerCamelCase )
torch.save(model_to_save.state_dict(), lowerCamelCase )
model_to_save.config.to_json_file(lowerCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCamelCase : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCamelCase : Optional[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(lowerCamelCase )
if args.do_eval:
model.eval()
lowerCamelCase , lowerCamelCase : Dict = 0, 0
lowerCamelCase , lowerCamelCase : Dict = 0, 0
for batch in tqdm(lowerCamelCase, desc="""Evaluating""" ):
lowerCamelCase : List[Any] = tuple(t.to(lowerCamelCase ) for t in batch )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = batch
with torch.no_grad():
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = model(
lowerCamelCase, mc_token_ids=lowerCamelCase, lm_labels=lowerCamelCase, mc_labels=lowerCamelCase )
lowerCamelCase : str = mc_logits.detach().cpu().numpy()
lowerCamelCase : Tuple = mc_labels.to("""cpu""" ).numpy()
lowerCamelCase : Dict = accuracy(lowerCamelCase, lowerCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCamelCase : List[Any] = eval_loss / nb_eval_steps
lowerCamelCase : str = eval_accuracy / nb_eval_examples
lowerCamelCase : Optional[int] = tr_loss / nb_tr_steps if args.do_train else None
lowerCamelCase : Tuple = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
lowerCamelCase : List[Any] = os.path.join(args.output_dir, """eval_results.txt""" )
with open(lowerCamelCase, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""", lowerCamelCase, str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 | 1 |
def _a ( lowerCamelCase, lowerCamelCase = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowerCamelCase : int = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
lowerCamelCase : Tuple = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowerCamelCase, 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase : List[str] = primes[:idx]
break
lowerCamelCase , lowerCamelCase : Optional[int] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase : List[Any] = False
for r in range(lowerCamelCase ):
lowerCamelCase : Tuple = pow(lowerCamelCase, d * 2**r, lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _a ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 681 |
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=1 / 2_5_5 , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase : str = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
lowerCamelCase : Dict = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : str = min_resolution
lowerCamelCase : Any = max_resolution
lowerCamelCase : Optional[Any] = do_resize
lowerCamelCase : Any = size
lowerCamelCase : Any = do_rescale
lowerCamelCase : int = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Dict = image_mean
lowerCamelCase : List[Any] = image_std
lowerCamelCase : int = do_pad
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False ):
if not batched:
lowerCamelCase : Any = image_inputs[0]
if isinstance(__magic_name__ , Image.Image ):
lowerCamelCase , lowerCamelCase : str = image.size
else:
lowerCamelCase , lowerCamelCase : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase : int = int(self.size["""shortest_edge"""] * h / w )
lowerCamelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCamelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCamelCase : List[str] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCamelCase : Optional[Any] = self.size["""shortest_edge"""]
lowerCamelCase : int = self.size["""shortest_edge"""]
else:
lowerCamelCase : List[str] = []
for image in image_inputs:
lowerCamelCase , lowerCamelCase : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase : Tuple = max(__magic_name__ , key=lambda __magic_name__ : item[0] )[0]
lowerCamelCase : Any = max(__magic_name__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Union[str, Any] = DetrImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = DetrImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_rescale""" ) )
self.assertTrue(hasattr(__magic_name__ , """rescale_factor""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_pad""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
lowerCamelCase : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__magic_name__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase , lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase : Optional[int] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : str = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase : int = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ ( self ):
# prepare image and target
lowerCamelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCamelCase : str = json.loads(f.read() )
lowerCamelCase : str = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
lowerCamelCase : Union[str, Any] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCamelCase : Optional[int] = image_processing(images=__magic_name__ , annotations=__magic_name__ , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __magic_name__ )
lowerCamelCase : Any = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
lowerCamelCase : List[str] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __magic_name__ ) )
# verify boxes
lowerCamelCase : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __magic_name__ )
lowerCamelCase : Optional[int] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __magic_name__ ) )
# verify is_crowd
lowerCamelCase : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __magic_name__ ) )
# verify class_labels
lowerCamelCase : Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __magic_name__ ) )
# verify orig_size
lowerCamelCase : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __magic_name__ ) )
# verify size
lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
# prepare image, target and masks_path
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCamelCase : Dict = json.loads(f.read() )
lowerCamelCase : Tuple = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
lowerCamelCase : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCamelCase : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCamelCase : Tuple = image_processing(images=__magic_name__ , annotations=__magic_name__ , masks_path=__magic_name__ , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __magic_name__ )
lowerCamelCase : str = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
lowerCamelCase : Tuple = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __magic_name__ ) )
# verify boxes
lowerCamelCase : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __magic_name__ )
lowerCamelCase : int = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __magic_name__ ) )
# verify is_crowd
lowerCamelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __magic_name__ ) )
# verify class_labels
lowerCamelCase : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __magic_name__ ) )
# verify masks
lowerCamelCase : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __magic_name__ )
# verify orig_size
lowerCamelCase : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __magic_name__ ) )
# verify size
lowerCamelCase : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __magic_name__ ) )
| 681 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ):
lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Optional[int] = min_resolution
lowerCamelCase : Union[str, Any] = max_resolution
lowerCamelCase : Union[str, Any] = do_resize
lowerCamelCase : int = size
lowerCamelCase : int = do_center_crop
lowerCamelCase : Union[str, Any] = crop_size
lowerCamelCase : Union[str, Any] = do_normalize
lowerCamelCase : Dict = image_mean
lowerCamelCase : Optional[Any] = image_std
lowerCamelCase : Union[str, Any] = do_convert_rgb
def UpperCamelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase : Dict = []
for i in range(self.batch_size ):
lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ )
lowerCamelCase : Any = 3
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# Initialize image_processing
lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 681 | 1 |
from collections import deque
from .hash_table import HashTable
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__magic_name__ )
lowerCamelCase : Union[str, Any] = self.values[key]
def UpperCamelCase__ ( self ):
return (
sum(self.charge_factor - len(__magic_name__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__magic_name__ ) == 0
):
return key
return super()._collision_resolution(__magic_name__ , __magic_name__ )
| 681 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
lowerCamelCase : Tuple = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Dict = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : Union[str, Any] = depths
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Any = num_labels
lowerCamelCase : int = scope
lowerCamelCase : Optional[Any] = len(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = self.num_labels
lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ )
lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : List[str] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self ):
lowerCamelCase : int = TFResNetModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__magic_name__ )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = model_class(__magic_name__ )
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ):
lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__magic_name__ )
# verify the logits
lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
| 681 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : Tuple = WavaVecaPhonemeCTCTokenizer
_UpperCAmelCase : int = False
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : str = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
lowerCamelCase : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase : Tuple = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=2_0 , __magic_name__=5 ):
lowerCamelCase : Optional[int] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__magic_name__ )) for i in range(len(__magic_name__ ) )]
lowerCamelCase : Dict = list(filter(lambda __magic_name__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__magic_name__ ) , __magic_name__ ) )
if max_length is not None and len(__magic_name__ ) > max_length:
lowerCamelCase : Any = toks[:max_length]
if min_length is not None and len(__magic_name__ ) < min_length and len(__magic_name__ ) > 0:
while len(__magic_name__ ) < min_length:
lowerCamelCase : int = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase : Tuple = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase : List[Any] = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
if " " not in output_txt and len(__magic_name__ ) > 1:
lowerCamelCase : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__magic_name__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__magic_name__ )
)
if with_prefix_space:
lowerCamelCase : int = """ """ + output_txt
lowerCamelCase : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
return output_txt, output_ids
def UpperCamelCase__ ( self , **__magic_name__ ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
lowerCamelCase : Tuple = tokenizer("""m xxx ɪ""" , do_phonemize=__magic_name__ ).input_ids
self.assertEqual(__magic_name__ , [1_3, 3_9_2, 1_7] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
lowerCamelCase : Tuple = tokenizer("""m aaa ɪ ccc""" , do_phonemize=__magic_name__ ).input_ids
self.assertEqual(__magic_name__ , [1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCamelCase : Optional[int] = tokenizer("""maɪ c""" , do_phonemize=__magic_name__ ).input_ids
self.assertEqual(__magic_name__ , [3, 2_0_0] ) # mai should be <unk> (=3)
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCamelCase : Tuple = """Hello how are you"""
lowerCamelCase : Any = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
self.assertEqual(__magic_name__ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCamelCase : Any = """Hello how are you"""
lowerCamelCase : Dict = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(__magic_name__ ).input_ids , tokenizer(__magic_name__ , do_phonemize=__magic_name__ ).input_ids )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCamelCase : Optional[Any] = """Hello how are you"""
lowerCamelCase : str = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
lowerCamelCase : List[str] = tokenizer.decode(tokenizer(__magic_name__ ).input_ids )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCamelCase : Union[str, Any] = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
lowerCamelCase : Dict = tokenizer.decode(sample_ids[0] )
lowerCamelCase : List[Any] = tokenizer.batch_decode(__magic_name__ )
self.assertEqual(__magic_name__ , batch_tokens[0] )
self.assertEqual(__magic_name__ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCamelCase : Union[str, Any] = """Hello how are you"""
lowerCamelCase : Tuple = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
self.assertEqual(__magic_name__ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCamelCase : Any = """Hello how are you"""
lowerCamelCase : Optional[int] = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(__magic_name__ ).input_ids , tokenizer(__magic_name__ , do_phonemize=__magic_name__ ).input_ids )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
lowerCamelCase : Any = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
lowerCamelCase : int = tokenizer.decode(sample_ids[0] )
lowerCamelCase : Optional[Any] = tokenizer.batch_decode(__magic_name__ )
self.assertEqual(__magic_name__ , batch_tokens[0] )
self.assertEqual(__magic_name__ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
lowerCamelCase : Union[str, Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__magic_name__ )
lowerCamelCase : Tuple = tokenizer.batch_decode(__magic_name__ , filter_word_delimiter_token=__magic_name__ )
self.assertEqual(__magic_name__ , batch_tokens[0] )
self.assertEqual(__magic_name__ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCamelCase : Optional[int] = """Hello how are you"""
lowerCamelCase : Union[str, Any] = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
lowerCamelCase : Optional[Any] = tokenizer.decode(tokenizer(__magic_name__ ).input_ids , filter_word_delimiter_token=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCamelCase : List[Any] = """Hello how are you"""
lowerCamelCase : Dict = tokenizer.phonemize(__magic_name__ , phonemizer_lang="""en-us""" )
lowerCamelCase : Tuple = tokenizer.decode(tokenizer(__magic_name__ ).input_ids , filter_word_delimiter_token=__magic_name__ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=__magic_name__ )
lowerCamelCase : List[str] = """Hello how are you"""
lowerCamelCase : Optional[Any] = tokenizer(__magic_name__ , phonemizer_lang="""en-us""" ).input_ids
lowerCamelCase : Optional[Any] = tokenizer(__magic_name__ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(__magic_name__ , __magic_name__ )
lowerCamelCase : Optional[int] = tokenizer.decode(__magic_name__ )
lowerCamelCase : str = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(__magic_name__ , """ɛ l o h aʊ a ʁ j u""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCamelCase : List[Any] = """Hello how Are you"""
lowerCamelCase : Tuple = """hello how are you"""
lowerCamelCase : str = tokenizer(__magic_name__ ).input_ids
lowerCamelCase : Optional[int] = tokenizer(__magic_name__ ).input_ids
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
lowerCamelCase : List[str] = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
lowerCamelCase : List[Any] = tokenizer.batch_decode(__magic_name__ )
self.assertEqual(__magic_name__ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
lowerCamelCase : int = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCamelCase : Union[str, Any] = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
lowerCamelCase : str = tokenizer.decode(__magic_name__ , output_char_offsets=__magic_name__ , filter_word_delimiter_token=__magic_name__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(__magic_name__ , __magic_name__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(__magic_name__ , __magic_name__ ):
self.assertTrue(isinstance(__magic_name__ , __magic_name__ ) )
self.assertTrue(isinstance(outputs_list[0] , __magic_name__ ) )
# transform list to ModelOutput
lowerCamelCase : Tuple = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(__magic_name__ , __magic_name__ ):
if isinstance(__magic_name__ , __magic_name__ ):
[recursive_check(__magic_name__ , __magic_name__ ) for la, la in zip(__magic_name__ , __magic_name__ )]
self.assertEqual(__magic_name__ , __magic_name__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
lowerCamelCase : Dict = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCamelCase : str = tokenizer.batch_decode(__magic_name__ , output_char_offsets=__magic_name__ )
lowerCamelCase : Dict = [tokenizer.decode(__magic_name__ , output_char_offsets=__magic_name__ ) for ids in sample_ids]
check_list_tuples_equal(__magic_name__ , __magic_name__ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase : Union[str, Any] = tokenizer.vocab_size
lowerCamelCase : List[str] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCamelCase : Tuple = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
lowerCamelCase : Union[str, Any] = tokenizer.add_tokens(__magic_name__ )
lowerCamelCase : List[str] = tokenizer.vocab_size
lowerCamelCase : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) )
lowerCamelCase : str = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCamelCase : str = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
lowerCamelCase : Dict = tokenizer.add_special_tokens(__magic_name__ )
lowerCamelCase : Optional[int] = tokenizer.vocab_size
lowerCamelCase : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) )
lowerCamelCase : Optional[int] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
lowerCamelCase : Dict = self.get_tokenizers(fast=__magic_name__ , do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase : Optional[Any] = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
lowerCamelCase : Tuple = tokenizer.convert_tokens_to_string(__magic_name__ )
self.assertIsInstance(output["""text"""] , __magic_name__ )
| 681 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 681 | 1 |
from __future__ import annotations
import math
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : str = size
# approximate the overall size of segment tree with given value
lowerCamelCase : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCamelCase : Dict = [0 for i in range(0 , 4 * size )]
lowerCamelCase : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCamelCase__ ( self , __magic_name__ ):
return idx * 2
def UpperCamelCase__ ( self , __magic_name__ ):
return idx * 2 + 1
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if left_element == right_element:
lowerCamelCase : Optional[int] = a[left_element - 1]
else:
lowerCamelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(__magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ )
self.build(self.right(__magic_name__ ) , mid + 1 , __magic_name__ , __magic_name__ )
lowerCamelCase : Dict = max(
self.segment_tree[self.left(__magic_name__ )] , self.segment_tree[self.right(__magic_name__ )] )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if self.flag[idx] is True:
lowerCamelCase : Union[str, Any] = self.lazy[idx]
lowerCamelCase : str = False
if left_element != right_element:
lowerCamelCase : List[str] = self.lazy[idx]
lowerCamelCase : Optional[int] = self.lazy[idx]
lowerCamelCase : str = True
lowerCamelCase : Tuple = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCamelCase : int = val
if left_element != right_element:
lowerCamelCase : Union[str, Any] = val
lowerCamelCase : List[Any] = val
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[int] = True
return True
lowerCamelCase : Any = (left_element + right_element) // 2
self.update(self.left(__magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
self.update(self.right(__magic_name__ ) , mid + 1 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase : List[str] = max(
self.segment_tree[self.left(__magic_name__ )] , self.segment_tree[self.right(__magic_name__ )] )
return True
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if self.flag[idx] is True:
lowerCamelCase : str = self.lazy[idx]
lowerCamelCase : List[Any] = False
if left_element != right_element:
lowerCamelCase : Any = self.lazy[idx]
lowerCamelCase : Any = self.lazy[idx]
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCamelCase : List[str] = (left_element + right_element) // 2
lowerCamelCase : Union[str, Any] = self.query(self.left(__magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase : Optional[int] = self.query(self.right(__magic_name__ ) , mid + 1 , __magic_name__ , __magic_name__ , __magic_name__ )
return max(__magic_name__ , __magic_name__ )
def __str__( self ):
return str([self.query(1 , 1 , self.size , __magic_name__ , __magic_name__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCamelCase =[1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
_lowerCamelCase =1_5
_lowerCamelCase =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 681 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase ):
lowerCamelCase : Union[str, Any] = str(lowerCamelCase )
return n == n[::-1]
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = 0
for i in range(1, lowerCamelCase ):
if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 681 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
# setable values
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None
_UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCamelCase__ ( cls ):
return cls()
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : jnp.ndarray
_UpperCAmelCase : KarrasVeSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
return True
@register_to_config
def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ):
pass
def UpperCamelCase__ ( self ):
return KarrasVeSchedulerState.create()
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ):
lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy()
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 )
lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape )
lowerCamelCase : List[Any] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ):
lowerCamelCase : str = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
raise NotImplementedError()
| 681 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =torch.device("""cpu""")
def _a ( ):
lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
def _a ( lowerCamelCase ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = dct.pop(lowerCamelCase )
lowerCamelCase : Dict = val
def _a ( lowerCamelCase ):
lowerCamelCase : List[str] = []
for k in state_dict.keys():
lowerCamelCase : Dict = k
if ".pwconv" in k:
lowerCamelCase : Dict = k_new.replace(""".pwconv""", """.point_wise_conv""" )
if ".dwconv" in k:
lowerCamelCase : int = k_new.replace(""".dwconv""", """.depth_wise_conv""" )
if ".Proj." in k:
lowerCamelCase : Dict = k_new.replace(""".Proj.""", """.proj.""" )
if "patch_embed" in k_new:
lowerCamelCase : Any = k_new.replace("""patch_embed""", """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
lowerCamelCase : Union[str, Any] = k_new.split(""".""" )
if ls[2].isdigit():
lowerCamelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
lowerCamelCase : List[Any] = k_new.replace("""network""", """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : int = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : int = 1000
lowerCamelCase : int = """huggingface/label-files"""
lowerCamelCase : Any = """imagenet-1k-id2label.json"""
lowerCamelCase : Tuple = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type="""dataset""" ), """r""" ) )
lowerCamelCase : List[str] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : Dict = idalabel
lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCamelCase : List[Any] = [3, 3, 6, 4]
lowerCamelCase : Union[str, Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCamelCase : int = [3, 3, 9, 6]
lowerCamelCase : Union[str, Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCamelCase : Optional[int] = [4, 3, 10, 5]
lowerCamelCase : Tuple = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCamelCase : Optional[int] = [4, 4, 12, 6]
lowerCamelCase : Tuple = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
lowerCamelCase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""", check_hash=lowerCamelCase )
else:
lowerCamelCase : Union[str, Any] = torch.load(lowerCamelCase, map_location="""cpu""" )
lowerCamelCase : int = checkpoint
lowerCamelCase : Union[str, Any] = create_rename_keys(lowerCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# load HuggingFace model
lowerCamelCase : str = SwiftFormerForImageClassification(lowerCamelCase ).eval()
hf_model.load_state_dict(lowerCamelCase )
# prepare test inputs
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Any = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
lowerCamelCase : Tuple = processor(images=lowerCamelCase, return_tensors="""pt""" )
# compare outputs from both models
lowerCamelCase : str = get_expected_output(lowerCamelCase )
lowerCamelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5], lowerCamelCase, atol=1e-3 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
_lowerCamelCase =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 681 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( lowerCamelCase, lowerCamelCase=False ):
lowerCamelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[Any] = """"""
else:
lowerCamelCase : Optional[int] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : Any = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowerCamelCase : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Dict = dct.pop(lowerCamelCase )
lowerCamelCase : str = val
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Any = ViTMSNConfig()
lowerCamelCase : Tuple = 1000
lowerCamelCase : List[Any] = """datasets/huggingface/label-files"""
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) )
lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[int] = idalabel
lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase : int = 384
lowerCamelCase : Optional[int] = 1536
lowerCamelCase : Tuple = 6
elif "l16" in checkpoint_url:
lowerCamelCase : Dict = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Optional[int] = 24
lowerCamelCase : str = 16
lowerCamelCase : str = 0.1
elif "b4" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowerCamelCase : Tuple = 7
lowerCamelCase : Optional[int] = 1024
lowerCamelCase : List[Any] = 4096
lowerCamelCase : Tuple = 24
lowerCamelCase : Dict = 16
lowerCamelCase : str = 0.1
lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase )
lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""]
lowerCamelCase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase )
lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
lowerCamelCase : Union[str, Any] = ViTImageProcessor(
size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase )
lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : int = model(**lowerCamelCase )
lowerCamelCase : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 681 |
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCamelCase =argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
_lowerCamelCase =parser.parse_args()
_lowerCamelCase ="""cpu"""
_lowerCamelCase ="""a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
_lowerCamelCase ="""path-to-your-trained-model"""
_lowerCamelCase =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCamelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCamelCase =pipe.to(device)
# to channels last
_lowerCamelCase =pipe.unet.to(memory_format=torch.channels_last)
_lowerCamelCase =pipe.vae.to(memory_format=torch.channels_last)
_lowerCamelCase =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCamelCase =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCamelCase =torch.randn(2, 4, 6_4, 6_4)
_lowerCamelCase =torch.rand(1) * 9_9_9
_lowerCamelCase =torch.randn(2, 7_7, 7_6_8)
_lowerCamelCase =(sample, timestep, encoder_hidden_status)
try:
_lowerCamelCase =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCamelCase =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCamelCase =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCamelCase =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCamelCase =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCamelCase =6_6_6
_lowerCamelCase =torch.Generator(device).manual_seed(seed)
_lowerCamelCase ={"""generator""": generator}
if args.steps is not None:
_lowerCamelCase =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCamelCase =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 681 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCamelCase ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCamelCase ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase : Any = numpy_to_pil(lowerCamelCase )
return images
def _a ( lowerCamelCase ):
if images.ndim == 3:
lowerCamelCase : Optional[Any] = images[None, ...]
lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images]
else:
lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 681 | 1 |
import re
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(lowerCamelCase, lowerCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 681 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module):
def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ):
super().__init__()
lowerCamelCase : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase : List[Any] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase : Optional[int] = [1, 0]
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ):
lowerCamelCase : List[Any] = hidden_states
lowerCamelCase : Dict = []
lowerCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i]
lowerCamelCase : List[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 681 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class A__ :
@staticmethod
def UpperCamelCase__ ( *__magic_name__ , **__magic_name__ ):
pass
def _a ( lowerCamelCase ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_lowerCamelCase =(
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class A__ ( unittest.TestCase):
_UpperCAmelCase : List[str] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = pipeline(
"""document-question-answering""" , model=__magic_name__ , tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCamelCase : Optional[Any] = INVOICE_URL
lowerCamelCase : Union[str, Any] = list(zip(*apply_tesseract(load_image(__magic_name__ ) , __magic_name__ , """""" ) ) )
lowerCamelCase : List[Any] = """What is the placebo?"""
lowerCamelCase : Any = [
{
"""image""": load_image(__magic_name__ ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[Any] = dqa_pipeline(__magic_name__ , top_k=2 )
self.assertEqual(
__magic_name__ , [
[
{"""score""": ANY(__magic_name__ ), """answer""": ANY(__magic_name__ ), """start""": ANY(__magic_name__ ), """end""": ANY(__magic_name__ )},
{"""score""": ANY(__magic_name__ ), """answer""": ANY(__magic_name__ ), """start""": ANY(__magic_name__ ), """end""": ANY(__magic_name__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
lowerCamelCase : Dict = INVOICE_URL
lowerCamelCase : Tuple = """How many cats are there?"""
lowerCamelCase : Tuple = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 3_8, """end""": 3_9},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 3_8, """end""": 4_0},
]
lowerCamelCase : List[Any] = dqa_pipeline(image=__magic_name__ , question=__magic_name__ , top_k=2 )
self.assertEqual(nested_simplify(__magic_name__ , decimals=4 ) , __magic_name__ )
lowerCamelCase : Any = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(__magic_name__ , decimals=4 ) , __magic_name__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase : Union[str, Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCamelCase : Any = dqa_pipeline(image=__magic_name__ , question=__magic_name__ , top_k=2 )
self.assertEqual(__magic_name__ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase : Optional[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCamelCase : List[Any] = []
lowerCamelCase : List[str] = []
lowerCamelCase : Dict = dqa_pipeline(image=__magic_name__ , question=__magic_name__ , words=__magic_name__ , boxes=__magic_name__ , top_k=2 )
self.assertEqual(__magic_name__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
lowerCamelCase : Tuple = INVOICE_URL
lowerCamelCase : Optional[Any] = """What is the invoice number?"""
lowerCamelCase : Optional[int] = dqa_pipeline(image=__magic_name__ , question=__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowerCamelCase : Dict = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowerCamelCase : str = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=5_0 , )
lowerCamelCase : Tuple = INVOICE_URL
lowerCamelCase : List[Any] = """What is the invoice number?"""
lowerCamelCase : List[Any] = dqa_pipeline(image=__magic_name__ , question=__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowerCamelCase : Dict = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowerCamelCase : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__magic_name__ )
lowerCamelCase : Optional[int] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__magic_name__ , revision="""3dc6de3""" , )
lowerCamelCase : Optional[Any] = INVOICE_URL
lowerCamelCase : Tuple = """What is the invoice number?"""
lowerCamelCase : Tuple = dqa_pipeline(image=__magic_name__ , question=__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
lowerCamelCase : List[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
lowerCamelCase : str = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
]
]
* 2 , )
lowerCamelCase : List[str] = list(zip(*apply_tesseract(load_image(__magic_name__ ) , __magic_name__ , """""" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase : str = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=__magic_name__ )
lowerCamelCase : int = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=__magic_name__ , revision="""3dc6de3""" , max_seq_len=5_0 , )
lowerCamelCase : Optional[Any] = INVOICE_URL
lowerCamelCase : Union[str, Any] = """What is the invoice number?"""
lowerCamelCase : int = dqa_pipeline(image=__magic_name__ , question=__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
lowerCamelCase : Optional[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 , )
lowerCamelCase : Dict = list(zip(*apply_tesseract(load_image(__magic_name__ ) , __magic_name__ , """""" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase : List[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] , )
@slow
@require_torch
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
lowerCamelCase : Any = INVOICE_URL
lowerCamelCase : int = """What is the invoice number?"""
lowerCamelCase : Tuple = dqa_pipeline(image=__magic_name__ , question=__magic_name__ , top_k=2 )
self.assertEqual(nested_simplify(__magic_name__ , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def UpperCamelCase__ ( self ):
pass
| 681 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase ="""▁"""
_lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : str = BertGenerationTokenizer
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = True
def UpperCamelCase__ ( self ):
super().setUp()
lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """<s>"""
lowerCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """Hello World!"""
lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase : str = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCamelCase : Dict = """ """.join(__magic_name__ )
lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ )
lowerCamelCase : Tuple = BertGenerationConfig()
lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__magic_name__ )
model(**__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
# fmt: off
lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 681 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = """informer"""
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "student_t" , __magic_name__ = "nll" , __magic_name__ = 1 , __magic_name__ = None , __magic_name__ = "mean" , __magic_name__ = 0 , __magic_name__ = 0 , __magic_name__ = 0 , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 6_4 , __magic_name__ = 3_2 , __magic_name__ = 3_2 , __magic_name__ = 2 , __magic_name__ = 2 , __magic_name__ = 2 , __magic_name__ = 2 , __magic_name__ = True , __magic_name__ = "gelu" , __magic_name__ = 0.05 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 0.1 , __magic_name__ = 1_0_0 , __magic_name__ = 0.02 , __magic_name__=True , __magic_name__ = "prob" , __magic_name__ = 5 , __magic_name__ = True , **__magic_name__ , ):
# time series specific configuration
lowerCamelCase : List[str] = prediction_length
lowerCamelCase : str = context_length or prediction_length
lowerCamelCase : str = distribution_output
lowerCamelCase : List[Any] = loss
lowerCamelCase : int = input_size
lowerCamelCase : Tuple = num_time_features
lowerCamelCase : List[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase : List[Any] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Union[str, Any] = num_static_real_features
lowerCamelCase : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = cardinality
else:
lowerCamelCase : str = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : Any = embedding_dimension
else:
lowerCamelCase : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : List[str] = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase : List[Any] = d_model
lowerCamelCase : str = encoder_attention_heads
lowerCamelCase : int = decoder_attention_heads
lowerCamelCase : str = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : Dict = encoder_layers
lowerCamelCase : str = decoder_layers
lowerCamelCase : Any = dropout
lowerCamelCase : str = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : Union[str, Any] = encoder_layerdrop
lowerCamelCase : Any = decoder_layerdrop
lowerCamelCase : Optional[Any] = activation_function
lowerCamelCase : str = init_std
lowerCamelCase : str = use_cache
# Informer
lowerCamelCase : Dict = attention_type
lowerCamelCase : Dict = sampling_factor
lowerCamelCase : List[str] = distil
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase =HfArgumentParser(InitializationArguments)
_lowerCamelCase =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 681 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 681 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self , __magic_name__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """sshleifer/tiny-gpt2"""
lowerCamelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = """sgugger/tiny-distilbert-classification"""
lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """sshleifer/tiny-gpt2"""
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """sshleifer/tiny-gpt2"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
lowerCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , )
lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , """sequential""" ) )
self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) )
self.assertTrue(hasattr(__magic_name__ , """current""" ) )
self.assertTrue(hasattr(__magic_name__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , )
lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ )
lowerCamelCase : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
| 681 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = """deformable_detr"""
_UpperCAmelCase : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=1_0_2_4 , __magic_name__=6 , __magic_name__=1_0_2_4 , __magic_name__=8 , __magic_name__=6 , __magic_name__=1_0_2_4 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=True , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=4 , __magic_name__=4 , __magic_name__=4 , __magic_name__=False , __magic_name__=3_0_0 , __magic_name__=False , __magic_name__=1 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.1 , __magic_name__=0.25 , __magic_name__=False , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : Tuple = backbone_config.get("""model_type""" )
lowerCamelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : Tuple = config_class.from_dict(__magic_name__ )
lowerCamelCase : Any = use_timm_backbone
lowerCamelCase : List[str] = backbone_config
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = num_queries
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : Optional[Any] = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : Any = encoder_layers
lowerCamelCase : str = encoder_attention_heads
lowerCamelCase : Optional[Any] = decoder_ffn_dim
lowerCamelCase : Optional[int] = decoder_layers
lowerCamelCase : Any = decoder_attention_heads
lowerCamelCase : Tuple = dropout
lowerCamelCase : Any = attention_dropout
lowerCamelCase : Any = activation_dropout
lowerCamelCase : Optional[Any] = activation_function
lowerCamelCase : Tuple = init_std
lowerCamelCase : Tuple = init_xavier_std
lowerCamelCase : str = encoder_layerdrop
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Dict = position_embedding_type
lowerCamelCase : Any = backbone
lowerCamelCase : Any = use_pretrained_backbone
lowerCamelCase : Tuple = dilation
# deformable attributes
lowerCamelCase : Optional[int] = num_feature_levels
lowerCamelCase : int = encoder_n_points
lowerCamelCase : Any = decoder_n_points
lowerCamelCase : int = two_stage
lowerCamelCase : Optional[int] = two_stage_num_proposals
lowerCamelCase : Any = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCamelCase : Dict = class_cost
lowerCamelCase : Tuple = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Tuple = mask_loss_coefficient
lowerCamelCase : str = dice_loss_coefficient
lowerCamelCase : Any = bbox_loss_coefficient
lowerCamelCase : str = giou_loss_coefficient
lowerCamelCase : Dict = eos_coefficient
lowerCamelCase : str = focal_alpha
lowerCamelCase : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Any = self.backbone_config.to_dict()
lowerCamelCase : List[Any] = self.__class__.model_type
return output
| 681 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _a ( lowerCamelCase ):
return x + 2
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """x = 3"""
lowerCamelCase : Tuple = {}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
lowerCamelCase : Optional[int] = """x = y"""
lowerCamelCase : Tuple = {"""y""": 5}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """y = add_two(x)"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """x = 3"""
lowerCamelCase : Dict = {}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """x = 3\ny = 5"""
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """text = f'This is x: {x}.'"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowerCamelCase : Tuple = {"""x""": 3}
lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} )
lowerCamelCase : Tuple = {"""x""": 8}
lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = """test_list = [x, add_two(x)]"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """y = x"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowerCamelCase : Any = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowerCamelCase : Dict = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i"""
lowerCamelCase : int = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
| 681 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase : Union[str, Any] = 192
lowerCamelCase : Dict = 768
lowerCamelCase : Union[str, Any] = 12
lowerCamelCase : int = 3
lowerCamelCase : List[str] = [800, 1333]
lowerCamelCase : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase : List[Any] = 330
lowerCamelCase : Optional[int] = 14
lowerCamelCase : str = 6
lowerCamelCase : List[Any] = 1320
elif "yolos_s" in yolos_name:
lowerCamelCase : Any = 384
lowerCamelCase : str = 1536
lowerCamelCase : Optional[int] = 12
lowerCamelCase : Tuple = 6
elif "yolos_b" in yolos_name:
lowerCamelCase : Optional[Any] = [800, 1344]
lowerCamelCase : List[Any] = 91
lowerCamelCase : Optional[int] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = """coco-detection-id2label.json"""
lowerCamelCase : List[str] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type="""dataset""" ), """r""" ) )
lowerCamelCase : Optional[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase : int = idalabel
lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : str = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Any = in_proj_weight[: config.hidden_size, :]
lowerCamelCase : Optional[Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Optional[int] = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase ):
if "backbone" in name:
lowerCamelCase : Union[str, Any] = name.replace("""backbone""", """vit""" )
if "cls_token" in name:
lowerCamelCase : Optional[int] = name.replace("""cls_token""", """embeddings.cls_token""" )
if "det_token" in name:
lowerCamelCase : Optional[Any] = name.replace("""det_token""", """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
lowerCamelCase : Dict = name.replace("""mid_pos_embed""", """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
lowerCamelCase : List[str] = name.replace("""pos_embed""", """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCamelCase : Optional[Any] = name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """encoder.layer""" )
if "attn.proj" in name:
lowerCamelCase : Optional[Any] = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name:
lowerCamelCase : List[Any] = name.replace("""attn""", """attention.self""" )
if "norm1" in name:
lowerCamelCase : Tuple = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : Union[str, Any] = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : List[Any] = name.replace("""mlp.fc2""", """output.dense""" )
if "class_embed" in name:
lowerCamelCase : Dict = name.replace("""class_embed""", """class_labels_classifier""" )
if "bbox_embed" in name:
lowerCamelCase : Union[str, Any] = name.replace("""bbox_embed""", """bbox_predictor""" )
if "vit.norm" in name:
lowerCamelCase : int = name.replace("""vit.norm""", """vit.layernorm""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : int = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : int = int(key_split[2] )
lowerCamelCase : Dict = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase : Union[str, Any] = val[:dim, :]
lowerCamelCase : Optional[int] = val[
dim : dim * 2, :
]
lowerCamelCase : List[Any] = val[-dim:, :]
else:
lowerCamelCase : int = val[:dim]
lowerCamelCase : int = val[dim : dim * 2]
lowerCamelCase : str = val[-dim:]
else:
lowerCamelCase : int = val
return orig_state_dict
def _a ( ):
lowerCamelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : int = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : List[str] = get_yolos_config(lowerCamelCase )
# load original state_dict
lowerCamelCase : Optional[Any] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
# load 🤗 model
lowerCamelCase : Union[str, Any] = YolosForObjectDetection(lowerCamelCase )
model.eval()
lowerCamelCase : List[str] = convert_state_dict(lowerCamelCase, lowerCamelCase )
model.load_state_dict(lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase : Optional[Any] = 800 if yolos_name != """yolos_ti""" else 512
lowerCamelCase : Optional[Any] = YolosImageProcessor(format="""coco_detection""", size=lowerCamelCase )
lowerCamelCase : List[str] = image_processor(images=prepare_img(), return_tensors="""pt""" )
lowerCamelCase : List[str] = model(**lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = outputs.logits, outputs.pred_boxes
lowerCamelCase , lowerCamelCase : Tuple = None, None
if yolos_name == "yolos_ti":
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase : Dict = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase : int = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase : Optional[Any] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase : Any = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase : Any = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase : Any = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase : List[Any] = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase : Optional[Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3], lowerCamelCase, atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3], lowerCamelCase, atol=1e-4 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
lowerCamelCase : int = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
lowerCamelCase : str = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase, organization="""hustvl""" )
model.push_to_hub(lowerCamelCase, organization="""hustvl""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 681 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCamelCase =logging.get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = question_encoder
lowerCamelCase : Dict = generator
lowerCamelCase : Tuple = self.question_encoder
def UpperCamelCase__ ( self , __magic_name__ ):
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" )
lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ )
if config is None:
lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ):
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.question_encoder
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.generator
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __magic_name__ , )
if max_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : int = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : Dict = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[Any] = labels["""input_ids"""]
return model_inputs
| 681 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase : Any = """BlipImageProcessor"""
_UpperCAmelCase : List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : int = False
super().__init__(__magic_name__ , __magic_name__ )
lowerCamelCase : int = self.image_processor
def __call__( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCamelCase : Dict = self.tokenizer
lowerCamelCase : Any = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
return text_encoding
# add pixel_values
lowerCamelCase : List[Any] = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
if text is not None:
lowerCamelCase : Union[str, Any] = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
else:
lowerCamelCase : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(__magic_name__ )
return encoding_image_processor
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.tokenizer.model_input_names
lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 681 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = F'''{sampling_rate}'''
lowerCamelCase : Optional[int] = """1"""
lowerCamelCase : Any = """f32le"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ):
lowerCamelCase : Dict = F'''{sampling_rate}'''
lowerCamelCase : List[Any] = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Any = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Dict = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase : Dict = platform.system()
if system == "Linux":
lowerCamelCase : Union[str, Any] = """alsa"""
lowerCamelCase : List[Any] = """default"""
elif system == "Darwin":
lowerCamelCase : List[Any] = """avfoundation"""
lowerCamelCase : List[Any] = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Any = """default"""
lowerCamelCase : Any = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase )
for item in iterator:
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ):
if stream_chunk_s is not None:
lowerCamelCase : int = stream_chunk_s
else:
lowerCamelCase : Dict = chunk_length_s
lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase : Optional[int] = np.intaa
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : int = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase, (int, float) ):
lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s]
lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : List[Any] = datetime.datetime.now()
lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase )
lowerCamelCase : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ):
lowerCamelCase : Optional[int] = B""""""
lowerCamelCase , lowerCamelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
lowerCamelCase : Optional[int] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : str = (_stride_left, stride_right)
lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : Optional[int] = False
yield item
lowerCamelCase : str = stride_left
lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[Any] = False
yield item
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 681 | 1 |
from __future__ import annotations
_lowerCamelCase =[-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
_lowerCamelCase =[-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def _a ( lowerCamelCase ):
lowerCamelCase : int = []
lowerCamelCase : str = len(lowerCamelCase )
for i in range(lowerCamelCase ):
lowerCamelCase : float = -1
for j in range(i + 1, lowerCamelCase ):
if arr[i] < arr[j]:
lowerCamelCase : Optional[int] = arr[j]
break
result.append(lowerCamelCase )
return result
def _a ( lowerCamelCase ):
lowerCamelCase : List[str] = []
for i, outer in enumerate(lowerCamelCase ):
lowerCamelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCamelCase : str = inner
break
result.append(lowerCamelCase )
return result
def _a ( lowerCamelCase ):
lowerCamelCase : List[str] = len(lowerCamelCase )
lowerCamelCase : list[float] = []
lowerCamelCase : list[float] = [-1] * arr_size
for index in reversed(range(lowerCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCamelCase : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCamelCase =(
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 681 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , )
assert hasattr(self , """env""" )
def UpperCamelCase__ ( self , __magic_name__ ):
# configuration for running training on smdistributed Model Parallel
lowerCamelCase : Any = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCamelCase : Any = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , )
def UpperCamelCase__ ( self , __magic_name__ ):
TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCamelCase__ ( self , __magic_name__ ):
# create estimator
lowerCamelCase : int = self.create_estimator(__magic_name__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
| 681 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.