code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowerCAmelCase_ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowerCAmelCase_ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowerCAmelCase_ )
return parser.parse_args()
def __snake_case ( ) -> int:
SCREAMING_SNAKE_CASE__ = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ = script_fpath.stem
SCREAMING_SNAKE_CASE__ = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 100 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : int=3_2 , snake_case__ : int=3 , snake_case__ : str=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : int=[1, 1, 2, 1] , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase :Union[str, Any] = parent
lowercase :Optional[Any] = batch_size
lowercase :Dict = image_size
lowercase :Any = num_channels
lowercase :List[str] = embeddings_size
lowercase :Union[str, Any] = hidden_sizes
lowercase :Any = depths
lowercase :Dict = is_training
lowercase :Any = use_labels
lowercase :Any = hidden_act
lowercase :List[str] = num_labels
lowercase :List[Any] = scope
lowercase :int = len(snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :Union[str, Any] = self.get_config()
return config, pixel_values
def __snake_case ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Any = FlaxRegNetModel(config=snake_case__ )
lowercase :str = model(snake_case__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.num_labels
lowercase :str = FlaxRegNetForImageClassification(config=snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = self.prepare_config_and_inputs()
lowercase , lowercase :Tuple = config_and_inputs
lowercase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__A : str = False
__A : Tuple = False
__A : Dict = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Dict = FlaxRegNetModelTester(self )
lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(snake_case__ )
lowercase :int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Tuple = [*signature.parameters.keys()]
lowercase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowercase :int = model_class(snake_case__ )
lowercase :Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase :Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase :Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase :List[Any] = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ : str , **snake_case__ : Optional[int] ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest('''JIT Enabled''' ):
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase () -> Tuple:
lowercase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_flax
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase :Optional[Any] = self.default_image_processor
lowercase :Dict = prepare_img()
lowercase :Any = image_processor(images=snake_case__ , return_tensors='''np''' )
lowercase :List[str] = model(**snake_case__ )
# verify the logits
lowercase :Any = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :List[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 677 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ : Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Any = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = val
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('backbone.0.body', 'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE_ : Optional[int] = value
else:
SCREAMING_SNAKE_CASE_ : List[str] = value
return new_state_dict
def a__ ( A__, A__=False ):
SCREAMING_SNAKE_CASE_ : List[Any] = ''
if is_panoptic:
SCREAMING_SNAKE_CASE_ : Any = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE_ : str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : str = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE_ : int = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE_ : Dict = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE_ : Optional[Any] = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[-2_5_6:]
def a__ ( ):
SCREAMING_SNAKE_CASE_ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : int = Image.open(requests.get(A__, stream=A__ ).raw )
return im
@torch.no_grad()
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'resnet101'
if "dc5" in model_name:
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'panoptic' in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE_ : Any = 2_5_0
else:
SCREAMING_SNAKE_CASE_ : Any = 9_1
SCREAMING_SNAKE_CASE_ : List[str] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ : List[str] = 'coco-detection-id2label.json'
SCREAMING_SNAKE_CASE_ : Any = json.load(open(hf_hub_download(A__, A__, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE_ : Tuple = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE_ : Optional[int] = 'coco_panoptic' if is_panoptic else 'coco_detection'
SCREAMING_SNAKE_CASE_ : List[Any] = ConditionalDetrImageProcessor(format=A__ )
# prepare image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(images=A__, return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Optional[int] = encoding['pixel_values']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.hub.load('DeppMeng/ConditionalDETR', A__, pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'conditional_detr.' + src
rename_key(A__, A__, A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__, is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE_ : Tuple = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
SCREAMING_SNAKE_CASE_ : Any = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE_ : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE_ : Any = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE_ : List[str] = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_ : Any = ConditionalDetrForSegmentation(A__ ) if is_panoptic else ConditionalDetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
model.push_to_hub(repo_id=A__, organization='DepuMeng', commit_message='Add model' )
# verify our conversion
SCREAMING_SNAKE_CASE_ : Optional[int] = conditional_detr(A__ )
SCREAMING_SNAKE_CASE_ : Dict = model(A__ )
assert torch.allclose(outputs.logits, original_outputs['pred_logits'], atol=1E-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs['pred_boxes'], atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['pred_masks'], atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase__ : Optional[Any] =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 101 |
"""simple docstring"""
UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase (a_ :dict , a_ :List[str] , a_ :Tuple) -> list[str]:
lowercase :str = set()
# keep track of all the paths to be checked
lowercase :Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase :Optional[int] = queue.pop(0)
# get the last node from the path
lowercase :Any = path[-1]
if node not in explored:
lowercase :int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase :List[Any] = list(a_)
new_path.append(a_)
queue.append(a_)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a_)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase (a_ :dict , a_ :List[Any] , a_ :List[Any]) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase :List[str] = [start]
lowercase :Optional[Any] = set(a_)
# Keep tab on distances from `start` node.
lowercase :Union[str, Any] = {start: 0, target: -1}
while queue:
lowercase :Union[str, Any] = queue.pop(0)
if node == target:
lowercase :Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a_)
queue.append(a_)
lowercase :Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 677 | 0 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__magic_name__ : int = logging.get_logger("""transformers.models.speecht5""")
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
hf_model.apply_weight_norm()
UpperCamelCase : Optional[Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase : Tuple = checkpoint["""input_conv.weight_v"""]
UpperCamelCase : Union[str, Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase : Dict = checkpoint[f"""upsamples.{i}.1.weight_g"""]
UpperCamelCase : Optional[int] = checkpoint[f"""upsamples.{i}.1.weight_v"""]
UpperCamelCase : Tuple = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase : Optional[Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCamelCase : Dict = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCamelCase : Tuple = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCamelCase : Union[str, Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCamelCase : Union[str, Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCamelCase : Dict = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase : Dict = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
if config_path is not None:
UpperCamelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Tuple = SpeechTaHifiGanConfig()
UpperCamelCase : Any = SpeechTaHifiGan(SCREAMING_SNAKE_CASE )
UpperCamelCase : str = torch.load(SCREAMING_SNAKE_CASE )
load_weights(orig_checkpoint["""model"""]["""generator"""] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = np.load(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = stats[0].reshape(-1 )
UpperCamelCase : List[Any] = stats[1].reshape(-1 )
UpperCamelCase : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).float()
UpperCamelCase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE ).float()
model.save_pretrained(SCREAMING_SNAKE_CASE )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__magic_name__ : Any = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 102 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase (a_ :str , a_ :List[str]=100 , a_ :Optional[Any]=" ") -> List[str]:
lowercase :str = text.split(a_)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(a_) , a_)]
def lowerCamelCase (a_ :dict) -> dict:
lowercase , lowercase :str = [], []
for title, text in zip(documents['''title'''] , documents['''text''']):
if text is not None:
for passage in split_text(a_):
titles.append(title if title is not None else '''''')
texts.append(a_)
return {"title": titles, "text": texts}
def lowerCamelCase (a_ :dict , a_ :DPRContextEncoder , a_ :DPRContextEncoderTokenizerFast) -> dict:
lowercase :Tuple = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''')['''input_ids''']
lowercase :Optional[Any] = ctx_encoder(input_ids.to(device=a_) , return_dict=a_).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase (a_ :"RagExampleArguments" , a_ :"ProcessingArguments" , a_ :"IndexHnswArguments" , ) -> Any:
######################################
logger.info('''Step 1 - Create the dataset''')
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase :List[Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase :Optional[Any] = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase :str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=a_)
lowercase :Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase :str = Features(
{'''text''': Value('''string'''), '''title''': Value('''string'''), '''embeddings''': Sequence(Value('''float32'''))}) # optional, save as float32 instead of float64 to save space
lowercase :Optional[Any] = dataset.map(
partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , )
# And finally save your dataset
lowercase :str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''')
dataset.save_to_disk(a_)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''')
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase :str = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('''embeddings''' , custom_index=a_)
# And save the index
lowercase :Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''')
dataset.get_index('''embeddings''').save(a_)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __magic_name__ :
__A : str = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
__A : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
__A : Optional[str] = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class __magic_name__ :
__A : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
__A : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class __magic_name__ :
__A : int = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
__A : int = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 677 | 0 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
if height >= 1:
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
move_disk(lowerCAmelCase_ , lowerCAmelCase_ )
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
print('''moving disk from''' , lowerCAmelCase_ , '''to''' , lowerCAmelCase_ )
def snake_case ( ) -> List[str]:
_snake_case = int(input('''Height of hanoi: ''' ).strip() )
move_tower(lowerCAmelCase_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 103 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCamelCase = """\
"""
UpperCamelCase = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCamelCase = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__=None ) -> List[str]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A__ = "cuda"
else:
A__ = "cuda" if torch.cuda.is_available() else "cpu"
A__ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
A__ = model.to(SCREAMING_SNAKE_CASE__ )
A__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(SCREAMING_SNAKE_CASE__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A__ = model.config.max_length - 1
else:
A__ = model.config.max_length
A__ = tokenizer(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors="pt" , return_attention_mask=SCREAMING_SNAKE_CASE__ , ).to(SCREAMING_SNAKE_CASE__ )
A__ = encodings["input_ids"]
A__ = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A__ = []
A__ = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) ):
A__ = min(start_index + batch_size , len(SCREAMING_SNAKE_CASE__ ) )
A__ = encoded_texts[start_index:end_index]
A__ = attn_masks[start_index:end_index]
if add_start_token:
A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(SCREAMING_SNAKE_CASE__ )
A__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(SCREAMING_SNAKE_CASE__ ), attn_mask] , dim=1 )
A__ = encoded_batch
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).logits
A__ = out_logits[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = attn_mask[..., 1:].contiguous()
A__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , SCREAMING_SNAKE_CASE__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(SCREAMING_SNAKE_CASE__ )}
| 104 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ :
def __init__( self : Tuple , snake_case__ : str = None , snake_case__ : uuid.UUID = None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None ):
'''simple docstring'''
if not conversation_id:
lowercase :List[Any] = uuid.uuida()
if past_user_inputs is None:
lowercase :Union[str, Any] = []
if generated_responses is None:
lowercase :List[str] = []
lowercase :uuid.UUID = conversation_id
lowercase :List[str] = past_user_inputs
lowercase :List[str] = generated_responses
lowercase :Optional[str] = text
def __eq__( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __snake_case ( self : Optional[int] , snake_case__ : str , snake_case__ : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
lowercase :List[str] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowercase :Optional[int] = text
def __snake_case ( self : Any ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase :Tuple = None
def __snake_case ( self : Tuple , snake_case__ : str ):
'''simple docstring'''
self.generated_responses.append(snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict ):
'''simple docstring'''
lowercase :int = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowercase :Dict = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__UpperCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if self.tokenizer.pad_token_id is None:
lowercase :Any = self.tokenizer.eos_token
def __snake_case ( self : List[Any] , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :str = {}
lowercase :List[str] = {}
lowercase :Tuple = {}
if min_length_for_response is not None:
lowercase :Dict = min_length_for_response
if minimum_tokens is not None:
lowercase :Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
lowercase :List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase :Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , snake_case__ : Union[Conversation, List[Conversation]] , snake_case__ : int=0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :int = super().__call__(snake_case__ , num_workers=snake_case__ , **snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1:
return outputs[0]
return outputs
def __snake_case ( self : List[Any] , snake_case__ : Conversation , snake_case__ : Any=3_2 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowercase :List[str] = self.tokenizer._build_conversation_input_ids(snake_case__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase :List[str] = self._legacy_parse_and_tokenize(snake_case__ )
if self.framework == "pt":
lowercase :int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase :Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __snake_case ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Any=1_0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowercase :Optional[Any] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowercase :int = max_length - minimum_tokens
lowercase :int = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowercase :int = model_inputs['''attention_mask'''][:, -trim:]
lowercase :int = model_inputs.pop('''conversation''' )
lowercase :Union[str, Any] = max_length
lowercase :Dict = self.model.generate(**snake_case__ , **snake_case__ )
if self.model.config.is_encoder_decoder:
lowercase :List[Any] = 1
else:
lowercase :Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any]=True ):
'''simple docstring'''
lowercase :Dict = model_outputs['''output_ids''']
lowercase :Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , )
lowercase :Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(snake_case__ )
return conversation
def __snake_case ( self : List[Any] , snake_case__ : Conversation ):
'''simple docstring'''
lowercase :str = self.tokenizer.eos_token_id
lowercase :List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
if len(snake_case__ ) > self.tokenizer.model_max_length:
lowercase :List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 677 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Any = BertTokenizer
__a : Tuple = BertTokenizerFast
__a : Union[str, Any] = True
__a : int = True
__a : Union[str, Any] = filter_non_english
def snake_case ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : List[str] = 'unwanted, running'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,[9, 6, 7, 12, 10, 11] )
def snake_case ( self ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : str = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = rust_tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# With lower casing
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer(do_lower_case=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_rust_tokenizer(do_lower_case=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = rust_tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = BasicTokenizer(do_lower_case=snake_case__ ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = BasicTokenizer()
SCREAMING_SNAKE_CASE_ : Any = 'a\n\'ll !!to?\'d of, can\'t.'
SCREAMING_SNAKE_CASE_ : Tuple = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(snake_case__ ) ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
SCREAMING_SNAKE_CASE_ : List[str] = {}
for i, token in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = i
SCREAMING_SNAKE_CASE_ : List[str] = WordpieceTokenizer(vocab=snake_case__ ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
def snake_case ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def snake_case ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def snake_case ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer_class.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode('sequence builders' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.build_inputs_with_special_tokens(snake_case__ ,snake_case__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.encode_plus(
snake_case__ ,return_attention_mask=snake_case__ ,return_token_type_ids=snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Any = tokenizer_r.do_lower_case if hasattr(snake_case__ ,'do_lower_case' ) else False
SCREAMING_SNAKE_CASE_ : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ['的', '人', '有']
SCREAMING_SNAKE_CASE_ : List[Any] = ''.join(snake_case__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_p.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case__ ,snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Dict = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_r.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_p.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer_r.convert_ids_to_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_ : List[Any] = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(snake_case__ )
]
self.assertListEqual(snake_case__ ,snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
| 105 |
"""simple docstring"""
def lowerCamelCase (a_ :int = 100) -> int:
lowercase :Union[str, Any] = set()
lowercase :List[Any] = 0
lowercase :Dict = n + 1 # maximum limit
for a in range(2 , a_):
for b in range(2 , a_):
lowercase :Tuple = a**b # calculates the current power
collect_powers.add(a_) # adds the result to the set
return len(a_)
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 677 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
A = 'ylacombe/bark-small'
A = tempfile.mkdtemp()
A = 'en_speaker_1'
A = 'This is a test string'
A = 'speaker_embeddings_path.json'
A = 'speaker_embeddings'
def __UpperCamelCase ( self : List[str] , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]:
return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> List[str]:
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __UpperCamelCase ( self : Tuple ) -> int:
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __UpperCamelCase ( self : int ) -> List[str]:
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A = 35
A = 2
A = 8
A = {
'semantic_prompt': np.ones(__UpperCamelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A = processor(text=self.input_string , voice_preset=__UpperCamelCase )
A = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(__UpperCamelCase , **__UpperCamelCase )
A = processor(text=self.input_string , voice_preset=__UpperCamelCase )
A = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A = processor(text=self.input_string , voice_preset=self.voice_preset )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=__UpperCamelCase )
A = processor(text=self.input_string )
A = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 106 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[Any] = "xlm-prophetnet"
__A : List[str] = ["past_key_values"]
__A : int = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : Any , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[Union[str, Callable]] = "gelu" , snake_case__ : Optional[int] = 3_0_5_2_2 , snake_case__ : Optional[int] = 1_0_2_4 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[int] = 5_1_2 , snake_case__ : Optional[float] = 0.02 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 2 , snake_case__ : Optional[int] = 3_2 , snake_case__ : Optional[int] = 1_2_8 , snake_case__ : Optional[bool] = False , snake_case__ : Optional[float] = 0.0 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 1 , snake_case__ : Optional[int] = 2 , **snake_case__ : List[str] , ):
'''simple docstring'''
lowercase :Tuple = vocab_size
lowercase :Optional[int] = hidden_size
lowercase :Optional[int] = encoder_ffn_dim
lowercase :Optional[int] = num_encoder_layers
lowercase :Dict = num_encoder_attention_heads
lowercase :List[str] = decoder_ffn_dim
lowercase :Dict = num_decoder_layers
lowercase :List[Any] = num_decoder_attention_heads
lowercase :Optional[int] = max_position_embeddings
lowercase :Tuple = init_std # Normal(0, this parameter)
lowercase :int = activation_function
# parameters for xlmprophetnet
lowercase :Dict = ngram
lowercase :Optional[Any] = num_buckets
lowercase :Dict = relative_max_distance
lowercase :List[Any] = disable_ngram_loss
lowercase :Optional[Any] = eps
# 3 Types of Dropout
lowercase :Any = attention_dropout
lowercase :List[str] = activation_dropout
lowercase :List[str] = dropout
lowercase :List[str] = use_cache
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 677 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_A = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_A = 1
if upper_limit > 0:
_A = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__snake_case ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
_UpperCAmelCase : int = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 107 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: Union[str, Any] = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 108 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = ["image_processor", "tokenizer"]
__A : Dict = "BlipImageProcessor"
__A : Dict = "AutoTokenizer"
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str ):
'''simple docstring'''
lowercase :Dict = False
super().__init__(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = self.image_processor
def __call__( self : Optional[int] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase :List[Any] = self.tokenizer
lowercase :str = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowercase :Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowercase :int = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowercase :Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def __snake_case ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def __snake_case ( self : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.tokenizer.model_input_names
lowercase :List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 677 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a = "cuda" if torch.cuda.is_available() else "cpu"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=" " ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = text.split(__UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )]
def __magic_name__ ( __UpperCAmelCase ) -> dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__UpperCAmelCase ):
titles.append(title if title is not None else """""" )
texts.append(__UpperCAmelCase )
return {"title": titles, "text": texts}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
__SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=__UpperCAmelCase ) , return_dict=__UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__SCREAMING_SNAKE_CASE = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__SCREAMING_SNAKE_CASE = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
__SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__SCREAMING_SNAKE_CASE = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
__SCREAMING_SNAKE_CASE = dataset.map(
partial(__UpperCAmelCase , ctx_encoder=__UpperCAmelCase , ctx_tokenizer=__UpperCAmelCase ) , batched=__UpperCAmelCase , batch_size=processing_args.batch_size , features=__UpperCAmelCase , )
# And finally save your dataset
__SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__UpperCAmelCase )
# And save the index
__SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __a :
__UpperCamelCase : str = field(
default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ), metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'}, )
__UpperCamelCase : str = field(
default='facebook/rag-sequence-nq', metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''}, )
__UpperCamelCase : str = field(
default='facebook/dpr-ctx_encoder-multiset-base', metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
}, )
__UpperCamelCase : Optional[str] = field(
default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' ), metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'}, )
@dataclass
class __a :
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
}, )
__UpperCamelCase : int = field(
default=16, metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
}, )
@dataclass
class __a :
__UpperCamelCase : int = field(
default=768, metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'}, )
__UpperCamelCase : int = field(
default=128, metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
}, )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a , a , a = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 109 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( __UpperCAmelCase ):
@require_torch
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[Any] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Any = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :Tuple = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :Any = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :List[str] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Dict = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :List[Any] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :str = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :str = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase :Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase :Optional[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :Union[str, Any] = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase :Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :Any = '''1'''
lowercase :Optional[Any] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Dict = '''
from transformers import pipeline
'''
lowercase :Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase :Dict = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase :Tuple = self.get_env()
lowercase :Optional[Any] = '''1'''
lowercase :Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = '''
from transformers import AutoModel
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :List[str] = self.get_env()
lowercase :Optional[int] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :Tuple = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 677 | 0 |
import qiskit
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : int = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
snake_case : Dict = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
snake_case : Any = qiskit.execute(a_ , a_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
__lowerCamelCase = single_qubit_measure(2, 2)
print(F'Total count for various states are: {counts}')
| 204 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger()
@dataclass
class __magic_name__ :
__A : nn.Module
__A : List[nn.Module] = field(default_factory=__UpperCAmelCase )
__A : list = field(default_factory=__UpperCAmelCase )
def __snake_case ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tensor , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self : int , snake_case__ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def __snake_case ( self : int ):
'''simple docstring'''
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
__A : nn.Module
__A : nn.Module
__A : int = 0
__A : List = field(default_factory=__UpperCAmelCase )
__A : List = field(default_factory=__UpperCAmelCase )
def __call__( self : Dict , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :Dict = Tracker(self.dest )(snake_case__ ).parametrized
lowercase :Optional[Any] = Tracker(self.src )(snake_case__ ).parametrized
lowercase :List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
lowercase :Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while"""
f""" destination module has {len(snake_case__ )}.""" )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCamelCase (a_ :str , a_ :ResNetConfig , a_ :Path , a_ :bool = True) -> Optional[Any]:
print(F"""Converting {name}...""")
with torch.no_grad():
lowercase :Union[str, Any] = timm.create_model(a_ , pretrained=a_).eval()
lowercase :Tuple = ResNetForImageClassification(a_).eval()
lowercase :int = ModuleTransfer(src=a_ , dest=a_)
lowercase :List[Any] = torch.randn((1, 3, 224, 224))
module_transfer(a_)
assert torch.allclose(from_model(a_) , our_model(a_).logits), "The model logits don't match the original one."
lowercase :List[Any] = F"""resnet{'-'.join(name.split('resnet'))}"""
print(a_)
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a_ , )
# we can use the convnext one
lowercase :Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''')
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a_ , )
print(F"""Pushed {checkpoint_name}""")
def lowerCamelCase (a_ :Path , a_ :str = None , a_ :bool = True) -> int:
lowercase :Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase :Union[str, Any] = 1000
lowercase :Any = (1, num_labels)
lowercase :Tuple = '''huggingface/label-files'''
lowercase :List[str] = num_labels
lowercase :Union[str, Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :Any = {int(a_): v for k, v in idalabel.items()}
lowercase :str = idalabel
lowercase :Any = {v: k for k, v in idalabel.items()}
lowercase :Union[str, Any] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_)
lowercase :Optional[int] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_)
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 677 | 0 |
def A ( lowercase__ : Any , lowercase__ : Optional[int] ) -> List[str]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def A ( lowercase__ : Dict , lowercase__ : Dict=0 ) -> List[Any]:
return sorted(a_ , key=lambda lowercase__ : x[column] )
def A ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any]=float("""inf""" ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a_ ):
UpperCamelCase__ :List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCamelCase__ :str = current_dis
return min_dis
def A ( lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : List[Any]=float("""inf""" ) ) -> Tuple:
for i in range(min(6 , points_counts - 1 ) , a_ ):
for j in range(max(0 , i - 6 ) , a_ ):
UpperCamelCase__ :Optional[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCamelCase__ :List[Any] = current_dis
return min_dis
def A ( lowercase__ : Any , lowercase__ : int , lowercase__ : Optional[Any] ) -> int:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a_ , a_ )
# recursion
UpperCamelCase__ :Any = points_counts // 2
UpperCamelCase__ :Optional[Any] = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[:mid] , a_ )
UpperCamelCase__ :Optional[int] = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[mid:] , points_counts - mid )
UpperCamelCase__ :Dict = min(a_ , a_ )
UpperCamelCase__ :List[Any] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a_ )
UpperCamelCase__ :Optional[int] = dis_between_closest_in_strip(
a_ , len(a_ ) , a_ )
return min(a_ , a_ )
def A ( lowercase__ : Optional[Any] , lowercase__ : List[str] ) -> List[str]:
UpperCamelCase__ :Tuple = column_based_sort(a_ , column=0 )
UpperCamelCase__ :Dict = column_based_sort(a_ , column=1 )
return (
closest_pair_of_points_sqr(
a_ , a_ , a_ )
) ** 0.5
if __name__ == "__main__":
UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points))) | 45 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : int = False
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=False ):
'''simple docstring'''
lowercase :Union[str, Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowercase :Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Any , snake_case__ : Dict , snake_case__ : Dict=1_3 , snake_case__ : Tuple=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Any=9_9 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Any=2 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=3_7 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : List[str]=1_6 , snake_case__ : Union[str, Any]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=4 , snake_case__ : int=None , ):
'''simple docstring'''
lowercase :Tuple = parent
lowercase :Tuple = batch_size
lowercase :Optional[Any] = seq_length
lowercase :Optional[Any] = is_training
lowercase :Optional[Any] = use_input_mask
lowercase :List[Any] = use_token_type_ids
lowercase :str = use_labels
lowercase :List[str] = vocab_size
lowercase :str = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Dict = num_attention_heads
lowercase :Any = intermediate_size
lowercase :List[str] = hidden_act
lowercase :Optional[Any] = hidden_dropout_prob
lowercase :List[Any] = attention_probs_dropout_prob
lowercase :List[Any] = max_position_embeddings
lowercase :List[Any] = type_vocab_size
lowercase :Union[str, Any] = type_sequence_label_size
lowercase :Union[str, Any] = initializer_range
lowercase :Any = num_labels
lowercase :int = num_choices
lowercase :Dict = scope
lowercase :Dict = embedding_size
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :int = None
if self.use_input_mask:
lowercase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase :Tuple = None
if self.use_token_type_ids:
lowercase :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase :Union[str, Any] = None
lowercase :int = None
lowercase :str = None
if self.use_labels:
lowercase :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase :Optional[int] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Dict = TFMobileBertModel(config=snake_case__ )
lowercase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
lowercase :Optional[int] = [input_ids, input_mask]
lowercase :Optional[int] = model(snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Any = TFMobileBertForMaskedLM(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Optional[Any] = TFMobileBertForNextSentencePrediction(config=snake_case__ )
lowercase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[Any] = TFMobileBertForSequenceClassification(config=snake_case__ )
lowercase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :Tuple = self.num_choices
lowercase :Any = TFMobileBertForMultipleChoice(config=snake_case__ )
lowercase :Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[str] = TFMobileBertForTokenClassification(config=snake_case__ )
lowercase :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str ):
'''simple docstring'''
lowercase :Union[str, Any] = TFMobileBertForQuestionAnswering(config=snake_case__ )
lowercase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Dict = config_and_inputs
lowercase :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
@slow
def __snake_case ( self : int ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowercase :List[str] = TFMobileBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase :Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase :List[Any] = model(snake_case__ )[0]
lowercase :Union[str, Any] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case__ )
lowercase :Optional[int] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 677 | 0 |
"""simple docstring"""
def UpperCAmelCase ( _lowercase : str , _lowercase : int ) -> list:
"""simple docstring"""
lowerCAmelCase_ = word.split()
def justify(_lowercase : list , _lowercase : int , _lowercase : int ) -> str:
lowerCAmelCase_ = max_width - width
lowerCAmelCase_ = len(a_ )
if len(a_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase_ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase_ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase_ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(a_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase_ = []
for i in range(a_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(a_ )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
for word in words:
if width + len(a_ ) + len(a_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(a_ )
width += len(a_ )
else:
# justify the line and add it to result
answer.append(justify(a_ , a_ , a_ ) )
# reset new line and new width
lowerCAmelCase_ = [word], len(a_ )
lowerCAmelCase_ = max_width - width - len(a_ )
answer.append(''' '''.join(a_ ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 552 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCamelCase (a_ :int) -> List[str]:
random.seed(a_)
np.random.seed(a_)
torch.manual_seed(a_)
torch.cuda.manual_seed_all(a_)
# ^^ safe to call this function even if cuda is not available
class __magic_name__ :
def __init__( self : Optional[Any] , snake_case__ : Iterable[torch.nn.Parameter] , snake_case__ : float = 0.99_99 , snake_case__ : float = 0.0 , snake_case__ : int = 0 , snake_case__ : bool = False , snake_case__ : Union[float, int] = 1.0 , snake_case__ : Union[float, int] = 2 / 3 , snake_case__ : Optional[Any] = None , snake_case__ : Dict[str, Any] = None , **snake_case__ : Tuple , ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :int = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase :Optional[Any] = True
if kwargs.get('''max_value''' , snake_case__ ) is not None:
lowercase :Optional[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :Optional[int] = kwargs['''max_value''']
if kwargs.get('''min_value''' , snake_case__ ) is not None:
lowercase :List[Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :str = kwargs['''min_value''']
lowercase :Any = list(snake_case__ )
lowercase :Optional[Any] = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , snake_case__ ) is not None:
lowercase :str = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
self.to(device=kwargs['''device'''] )
lowercase :int = None
lowercase :int = decay
lowercase :Union[str, Any] = min_decay
lowercase :List[Any] = update_after_step
lowercase :Union[str, Any] = use_ema_warmup
lowercase :Any = inv_gamma
lowercase :Any = power
lowercase :str = 0
lowercase :int = None # set in `step()`
lowercase :List[str] = model_cls
lowercase :Any = model_config
@classmethod
def __snake_case ( cls : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase , lowercase :int = model_cls.load_config(snake_case__ , return_unused_kwargs=snake_case__ )
lowercase :List[Any] = model_cls.from_pretrained(snake_case__ )
lowercase :Optional[int] = cls(model.parameters() , model_cls=snake_case__ , model_config=model.config )
ema_model.load_state_dict(snake_case__ )
return ema_model
def __snake_case ( self : int , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
lowercase :Dict = self.model_cls.from_config(self.model_config )
lowercase :Tuple = self.state_dict()
state_dict.pop('''shadow_params''' , snake_case__ )
model.register_to_config(**snake_case__ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case__ )
def __snake_case ( self : int , snake_case__ : int ):
'''simple docstring'''
lowercase :Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase :int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase :Dict = (1 + step) / (1_0 + step)
lowercase :Optional[int] = min(snake_case__ , self.decay )
# make sure decay is not smaller than min_decay
lowercase :Optional[int] = max(snake_case__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __snake_case ( self : Any , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :Tuple = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Union[str, Any] = parameters.parameters()
lowercase :Optional[Any] = list(snake_case__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase :List[Any] = self.get_decay(self.optimization_step )
lowercase :Optional[Any] = decay
lowercase :List[Any] = 1 - decay
lowercase :List[str] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase :Union[str, Any] = deepspeed.zero.GatheredParameters(snake_case__ , modifier_rank=snake_case__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case__ )
def __snake_case ( self : str , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :Optional[Any] = list(snake_case__ )
for s_param, param in zip(self.shadow_params , snake_case__ ):
param.data.copy_(s_param.to(param.device ).data )
def __snake_case ( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Dict=None ):
'''simple docstring'''
lowercase :str = [
p.to(device=snake_case__ , dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ )
for p in self.shadow_params
]
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __snake_case ( self : Optional[int] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :str = [param.detach().cpu().clone() for param in parameters]
def __snake_case ( self : List[Any] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , snake_case__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase :Dict = None
def __snake_case ( self : Union[str, Any] , snake_case__ : dict ):
'''simple docstring'''
lowercase :List[str] = copy.deepcopy(snake_case__ )
lowercase :Any = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
lowercase :int = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , snake_case__ ):
raise ValueError('''Invalid min_decay''' )
lowercase :List[Any] = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , snake_case__ ):
raise ValueError('''Invalid optimization_step''' )
lowercase :int = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , snake_case__ ):
raise ValueError('''Invalid update_after_step''' )
lowercase :Optional[int] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case__ ):
raise ValueError('''Invalid use_ema_warmup''' )
lowercase :Any = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
lowercase :Dict = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
lowercase :Optional[int] = state_dict.get('''shadow_params''' , snake_case__ )
if shadow_params is not None:
lowercase :List[Any] = shadow_params
if not isinstance(self.shadow_params , snake_case__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(snake_case__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 677 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_SCREAMING_SNAKE_CASE = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=a_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=a_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=a_ )
return parser.parse_args()
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
_SCREAMING_SNAKE_CASE = parse_args()
# Import training_script as a module.
_SCREAMING_SNAKE_CASE = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_SCREAMING_SNAKE_CASE = script_fpath.stem
_SCREAMING_SNAKE_CASE = importlib.import_module(a_ )
# Patch sys.argv
_SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 418 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :int , a_ :Union[str, Any] , a_ :List[Any]) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[int] , a_ :str , a_ :Any="attention") -> Optional[int]:
lowercase :Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :])
lowercase :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2])
lowercase :str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :])
lowercase :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2])
lowercase :int = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :])
lowercase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2])
lowercase :List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :])
lowercase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2])
return k, o, q, v
def lowerCamelCase (a_ :Any , a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> List[Any]:
if split_mlp_wi:
lowercase :List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase :Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase :Dict = (wi_a, wi_a)
else:
lowercase :Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase :Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase (a_ :Any , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase (a_ :dict , *, a_ :int , a_ :bool , a_ :bool = False) -> int:
lowercase :Dict = traverse_util.flatten_dict(variables['''target'''])
lowercase :Optional[Any] = {'''/'''.join(a_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase :str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , a_)
lowercase :str = collections.OrderedDict()
# Shared embeddings.
lowercase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Union[str, Any] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :Tuple = tax_attention_lookup(a_ , a_ , '''encoder''' , '''attention''')
lowercase :Dict = layer_norm
lowercase :Dict = k.T
lowercase :Union[str, Any] = o.T
lowercase :List[Any] = q.T
lowercase :int = v.T
# Block i, layer 1 (MLP).
lowercase :Optional[int] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :str = tax_mlp_lookup(a_ , a_ , '''encoder''' , a_)
lowercase :int = layer_norm
if split_mlp_wi:
lowercase :Tuple = wi[0].T
lowercase :Tuple = wi[1].T
else:
lowercase :int = wi.T
lowercase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Dict = tax_relpos_bias_lookup(
a_ , a_ , '''encoder''').T
lowercase :str = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase :str = tax_relpos_bias_lookup(
a_ , 0 , '''encoder''').T
lowercase :List[Any] = tax_relpos_bias_lookup(
a_ , 0 , '''decoder''').T
if not is_encoder_only:
# Decoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_self_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :str = tax_attention_lookup(a_ , a_ , '''decoder''' , '''self_attention''')
lowercase :List[str] = layer_norm
lowercase :Dict = k.T
lowercase :List[Any] = o.T
lowercase :List[Any] = q.T
lowercase :Any = v.T
# Block i, layer 1 (Cross Attention).
lowercase :Tuple = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_cross_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :int = tax_attention_lookup(a_ , a_ , '''decoder''' , '''encoder_decoder_attention''')
lowercase :int = layer_norm
lowercase :Dict = k.T
lowercase :int = o.T
lowercase :List[Any] = q.T
lowercase :Tuple = v.T
# Block i, layer 2 (MLP).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :Tuple = tax_mlp_lookup(a_ , a_ , '''decoder''' , a_)
lowercase :Any = layer_norm
if split_mlp_wi:
lowercase :int = wi[0].T
lowercase :Union[str, Any] = wi[1].T
else:
lowercase :int = wi.T
lowercase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Union[str, Any] = tax_relpos_bias_lookup(a_ , a_ , '''decoder''').T
lowercase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase :int = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase (a_ :Dict , a_ :bool) -> Tuple:
lowercase :str = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase :Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase :Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''')
lowercase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase (a_ :List[str] , a_ :List[str] , a_ :Tuple , a_ :Optional[int] , a_ :List[str]) -> List[str]:
lowercase :Optional[Any] = checkpoints.load_tax_checkpoint(a_)
lowercase :Optional[int] = convert_tax_to_pytorch(
a_ , num_layers=config.num_layers , is_encoder_only=a_ , scalable_attention=a_)
lowercase :Union[str, Any] = make_state_dict(a_ , a_)
model.load_state_dict(a_ , strict=a_)
def lowerCamelCase (a_ :str , a_ :Optional[int] , a_ :Any , a_ :bool = False , a_ :bool = False , ) -> Tuple:
lowercase :Optional[int] = MTaConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase :Union[str, Any] = UMTaEncoderModel(a_)
else:
lowercase :int = UMTaForConditionalGeneration(a_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a_ , a_ , a_ , a_ , a_)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
model.save_pretrained(a_)
# Verify that we can load the checkpoint.
model.from_pretrained(a_)
print('''Done''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 677 | 0 |
'''simple docstring'''
import re
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if len(re.findall('[ATCG]' , a_ ) ) != len(a_ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=3_2 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=[1_0, 2_0, 3_0, 4_0] , __lowerCAmelCase=[2, 2, 3, 2] , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=1_0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=["stage2", "stage3", "stage4"] , __lowerCAmelCase=[2, 3, 4] , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :List[str] = parent
__magic_name__ :int = batch_size
__magic_name__ :int = image_size
__magic_name__ :List[Any] = num_channels
__magic_name__ :str = num_stages
__magic_name__ :int = hidden_sizes
__magic_name__ :Tuple = depths
__magic_name__ :List[str] = is_training
__magic_name__ :Union[str, Any] = use_labels
__magic_name__ :List[str] = intermediate_size
__magic_name__ :List[Any] = hidden_act
__magic_name__ :Tuple = num_labels
__magic_name__ :List[str] = initializer_range
__magic_name__ :List[Any] = out_features
__magic_name__ :List[str] = out_indices
__magic_name__ :List[str] = scope
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ :Optional[int] = None
if self.use_labels:
__magic_name__ :List[str] = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A ( self ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = ConvNextVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__magic_name__ :Any = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = ConvNextVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__magic_name__ :Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = ConvNextVaBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__magic_name__ :Any = model(snake_case__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__magic_name__ :Union[str, Any] = None
__magic_name__ :Union[str, Any] = ConvNextVaBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__magic_name__ :Optional[int] = model(snake_case__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.prepare_config_and_inputs()
__magic_name__ :List[str] = config_and_inputs
__magic_name__ :str = {'''pixel_values''': pixel_values}
return config, inputs_dict
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.prepare_config_and_inputs()
__magic_name__ :Dict = config_and_inputs
__magic_name__ :Dict = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
a__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ConvNextVaModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__magic_name__ :Any = self.model_tester.prepare_config_and_inputs_with_labels()
__magic_name__ :List[Any] = True
if model_class.__name__ in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]:
continue
__magic_name__ :Any = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
__magic_name__ :Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
__magic_name__ :Union[str, Any] = model(**snake_case__ ).loss
loss.backward()
def A ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__magic_name__ :Any = False
__magic_name__ :List[str] = True
if (
model_class.__name__
in [*get_values(snake_case__ ), *get_values(snake_case__ )]
or not model_class.supports_gradient_checkpointing
):
continue
__magic_name__ :List[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.gradient_checkpointing_enable()
model.train()
__magic_name__ :Tuple = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
__magic_name__ :int = model(**snake_case__ ).loss
loss.backward()
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :List[Any] = model_class(snake_case__ )
__magic_name__ :Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ :Tuple = [*signature.parameters.keys()]
__magic_name__ :Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def A ( self ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Optional[int] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
__magic_name__ :List[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
__magic_name__ :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ :List[Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :List[str] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ :List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def A ( self ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ :str = ConvNextVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def A ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(snake_case__ )
__magic_name__ :Tuple = self.default_image_processor
__magic_name__ :Optional[int] = prepare_img()
__magic_name__ :List[Any] = preprocessor(images=snake_case__ , return_tensors='''pt''' ).to(snake_case__ )
# forward pass
with torch.no_grad():
__magic_name__ :Optional[int] = model(**snake_case__ )
# verify the logits
__magic_name__ :Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
__magic_name__ :Union[str, Any] = torch.tensor([0.9996, 0.1966, -0.4386] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "donut-swin"
__A : Optional[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , snake_case__ : Any=2_2_4 , snake_case__ : Tuple=4 , snake_case__ : str=3 , snake_case__ : Dict=9_6 , snake_case__ : Optional[Any]=[2, 2, 6, 2] , snake_case__ : Any=[3, 6, 1_2, 2_4] , snake_case__ : List[str]=7 , snake_case__ : Dict=4.0 , snake_case__ : str=True , snake_case__ : Optional[int]=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Any=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Tuple=False , snake_case__ : int=0.02 , snake_case__ : Optional[Any]=1e-5 , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Union[str, Any] = image_size
lowercase :Optional[Any] = patch_size
lowercase :List[str] = num_channels
lowercase :Optional[int] = embed_dim
lowercase :Optional[Any] = depths
lowercase :List[Any] = len(snake_case__ )
lowercase :Optional[Any] = num_heads
lowercase :int = window_size
lowercase :str = mlp_ratio
lowercase :Optional[int] = qkv_bias
lowercase :Dict = hidden_dropout_prob
lowercase :Any = attention_probs_dropout_prob
lowercase :Any = drop_path_rate
lowercase :int = hidden_act
lowercase :int = use_absolute_embeddings
lowercase :List[str] = layer_norm_eps
lowercase :Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase :str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
| 677 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
snake_case = logging.get_logger(__name__)
snake_case = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class __A ( __UpperCAmelCase ):
'''simple docstring'''
a_ = "longformer"
def __init__( self , _snake_case = 512 , _snake_case = 2 , _snake_case = 1 , _snake_case = 0 , _snake_case = 2 , _snake_case = 3_0522 , _snake_case = 768 , _snake_case = 12 , _snake_case = 12 , _snake_case = 3072 , _snake_case = "gelu" , _snake_case = 0.1 , _snake_case = 0.1 , _snake_case = 512 , _snake_case = 2 , _snake_case = 0.02 , _snake_case = 1E-1_2 , _snake_case = False , **_snake_case , ):
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : Any = attention_window
_lowerCAmelCase : Union[str, Any] = sep_token_id
_lowerCAmelCase : List[Any] = bos_token_id
_lowerCAmelCase : int = eos_token_id
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : List[Any] = onnx_export
class __A ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case = "default" , _snake_case = None ):
super().__init__(snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : Optional[Any] = True
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = super().outputs
if self.task == "default":
_lowerCAmelCase : int = {0: '''batch'''}
return outputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return max(super().default_onnx_opset , 14 )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ):
_lowerCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor=snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowerCAmelCase : Optional[int] = torch.zeros_like(inputs["input_ids"] )
# make every second token global
_lowerCAmelCase : Union[str, Any] = 1
return inputs
| 424 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase (a_ :Optional[int] , a_ :tuple , a_ :Path , a_ :str , a_ :int , a_ :List[Any] , a_ :Any , a_ :Union[str, Any]=False , ) -> Dict:
output_path.parent.mkdir(parents=a_ , exist_ok=a_)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , )
else:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , )
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :int , a_ :bool = False) -> Union[str, Any]:
lowercase :Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase :Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''')
else:
lowercase :List[str] = '''cpu'''
lowercase :List[str] = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_).to(a_)
lowercase :List[Any] = Path(a_)
# TEXT ENCODER
lowercase :List[Any] = pipeline.text_encoder.config.max_position_embeddings
lowercase :Dict = pipeline.text_encoder.config.hidden_size
lowercase :Union[str, Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa)) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , )
del pipeline.text_encoder
# UNET
lowercase :Any = pipeline.unet.config.in_channels
lowercase :List[Any] = pipeline.unet.config.sample_size
lowercase :Optional[int] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a_ , a_ , a_).to(device=a_ , dtype=a_),
torch.randn(2).to(device=a_ , dtype=a_),
torch.randn(2 , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=a_ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , use_external_data_format=a_ , )
lowercase :List[Any] = str(unet_path.absolute().as_posix())
lowercase :str = os.path.dirname(a_)
lowercase :Optional[Any] = onnx.load(a_)
# clean up existing tensor files
shutil.rmtree(a_)
os.mkdir(a_)
# collate external tensor files into one
onnx.save_model(
a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location='''weights.pb''' , convert_attribute=a_ , )
del pipeline.unet
# VAE ENCODER
lowercase :Tuple = pipeline.vae
lowercase :Optional[Any] = vae_encoder.config.in_channels
lowercase :Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase :Any = lambda a_ , a_: vae_encoder.encode(a_ , a_)[0].sample()
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
# VAE DECODER
lowercase :Any = pipeline.vae
lowercase :Dict = vae_decoder.config.latent_channels
lowercase :Union[str, Any] = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase :List[Any] = vae_encoder.decode
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase :Dict = pipeline.safety_checker
lowercase :str = safety_checker.config.vision_config.num_channels
lowercase :str = safety_checker.config.vision_config.image_size
lowercase :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_),
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=a_ , )
del pipeline.safety_checker
lowercase :Tuple = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''')
lowercase :Optional[Any] = pipeline.feature_extractor
else:
lowercase :int = None
lowercase :Union[str, Any] = None
lowercase :Optional[int] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''') , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a_)
print('''ONNX pipeline saved to''' , a_)
del pipeline
del onnx_pipeline
lowercase :Tuple = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider='''CPUExecutionProvider''')
print('''ONNX pipeline is loadable''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCAmelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 677 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _snake_case :
def __init__( self , a__ , a__=13 , a__=10 , a__=3 , a__=2 , a__=2 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.0_2 , a__="divided_space_time" , a__=None , ) -> Tuple:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = num_frames
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = attention_type
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = (num_frames) * self.num_patches_per_frame + 1
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
snake_case_ = self.num_labels
return config
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = TimesformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case_ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = TimesformerForVideoClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case_ = model(snake_case__ )
# verify the logits shape
snake_case_ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , snake_case__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCAmelCase_ : Dict = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Optional[int] = False
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = TimesformerModelTester(self )
snake_case_ = ConfigTester(
self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def lowerCAmelCase__ ( self , a__ , a__ , a__=False ) -> Dict:
'''simple docstring'''
snake_case_ = copy.deepcopy(snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(snake_case__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*snake_case__ )
@slow
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TimesformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
if not self.has_attentions:
pass
else:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = self.model_tester.seq_length
snake_case_ = self.model_tester.num_frames
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
snake_case_ = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
snake_case_ = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
snake_case_ = len(snake_case__ )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + 1 , len(snake_case__ ) )
snake_case_ = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(a__ , a__ , a__ ):
snake_case_ = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
snake_case_ = outputs.hidden_states
snake_case_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case__ ) , snake_case__ )
snake_case_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
snake_case_ = np.load(a_ )
return list(a_ )
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
snake_case__ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_video()
snake_case_ = image_processor(video[:8] , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case__ )
# verify the logits
snake_case_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , snake_case__ )
snake_case_ = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 400 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase (a_ :List[Any] , a_ :Union[str, Any] , a_ :Tuple , a_ :List[str] , a_ :str=True , a_ :str="pt") -> List[str]:
lowercase :Optional[int] = {'''add_prefix_space''': True} if isinstance(a_ , a_) and not line.startswith(''' ''') else {}
lowercase :Optional[int] = padding_side
return tokenizer(
[line] , max_length=a_ , padding='''max_length''' if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , )
def lowerCamelCase (a_ :str , a_ :Tuple , a_ :Optional[Any]=None , ) -> Tuple:
lowercase :Optional[Any] = input_ids.ne(a_).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str="train" , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Dict="" , ):
'''simple docstring'''
super().__init__()
lowercase :Tuple = Path(snake_case__ ).joinpath(type_path + '''.source''' )
lowercase :Union[str, Any] = Path(snake_case__ ).joinpath(type_path + '''.target''' )
lowercase :List[Any] = self.get_char_lens(self.src_file )
lowercase :Tuple = max_source_length
lowercase :Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase :Any = tokenizer
lowercase :Tuple = prefix
if n_obs is not None:
lowercase :List[str] = self.src_lens[:n_obs]
lowercase :List[Any] = src_lang
lowercase :str = tgt_lang
def __len__( self : Any ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : str , snake_case__ : Any ):
'''simple docstring'''
lowercase :Optional[int] = index + 1 # linecache starts at 1
lowercase :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip('''\n''' )
lowercase :Dict = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase :Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
lowercase :Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
lowercase :Optional[int] = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''' )
lowercase :Tuple = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''' )
lowercase :List[str] = source_inputs['''input_ids'''].squeeze()
lowercase :Optional[Any] = target_inputs['''input_ids'''].squeeze()
lowercase :List[str] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __snake_case ( snake_case__ : Optional[int] ):
'''simple docstring'''
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowercase :str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :List[Any] = trim_batch(snake_case__ , snake_case__ )
lowercase , lowercase :List[str] = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
lowercase :Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase = getLogger(__name__)
def lowerCamelCase (a_ :List[List]) -> Tuple:
return list(itertools.chain.from_iterable(a_))
def lowerCamelCase (a_ :str) -> None:
lowercase :List[str] = get_git_info()
save_json(a_ , os.path.join(a_ , '''git_log.json'''))
def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=4 , **a_ :Optional[Any]) -> str:
with open(a_ , '''w''') as f:
json.dump(a_ , a_ , indent=a_ , **a_)
def lowerCamelCase (a_ :Dict) -> Union[str, Any]:
with open(a_) as f:
return json.load(a_)
def lowerCamelCase () -> List[str]:
lowercase :Dict = git.Repo(search_parent_directories=a_)
lowercase :int = {
'''repo_id''': str(a_),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase (a_ :Callable , a_ :Iterable) -> List:
return list(map(a_ , a_))
def lowerCamelCase (a_ :Optional[Any] , a_ :str) -> Any:
with open(a_ , '''wb''') as f:
return pickle.dump(a_ , a_)
def lowerCamelCase (a_ :List[str]) -> List[str]:
def remove_articles(a_ :Union[str, Any]):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , a_)
def white_space_fix(a_ :Tuple):
return " ".join(text.split())
def remove_punc(a_ :int):
lowercase :List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(a_ :int):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_))))
def lowerCamelCase (a_ :List[str] , a_ :Any) -> List[str]:
lowercase :Dict = normalize_answer(a_).split()
lowercase :int = normalize_answer(a_).split()
lowercase :List[Any] = Counter(a_) & Counter(a_)
lowercase :Optional[int] = sum(common.values())
if num_same == 0:
return 0
lowercase :str = 1.0 * num_same / len(a_)
lowercase :Tuple = 1.0 * num_same / len(a_)
lowercase :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase (a_ :Tuple , a_ :Optional[Any]) -> List[Any]:
return normalize_answer(a_) == normalize_answer(a_)
def lowerCamelCase (a_ :List[str] , a_ :List[str]) -> Dict:
assert len(a_) == len(a_)
lowercase :Any = 0
for hypo, pred in zip(a_ , a_):
em += exact_match_score(a_ , a_)
if len(a_) > 0:
em /= len(a_)
return {"em": em}
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
return model_prefix.startswith('''rag''')
def lowerCamelCase (a_ :List[str] , a_ :Tuple , a_ :List[str]) -> Any:
lowercase :List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase :str = '''dropout_rate'''
for p in extra_params:
if getattr(a_ , a_ , a_):
if not hasattr(a_ , a_) and not hasattr(a_ , equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(a_))
delattr(a_ , a_)
continue
lowercase :List[str] = p if hasattr(a_ , a_) else equivalent_param[p]
setattr(a_ , a_ , getattr(a_ , a_))
delattr(a_ , a_)
return hparams, config
| 677 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ ):
if not isinstance(a_ , a_ ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(a_ , a_ ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
a_ = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
def lowerCamelCase (a_ :Tuple , a_ :int , a_ :Tuple , a_ :List[Any]) -> str:
if height >= 1:
move_tower(height - 1 , a_ , a_ , a_)
move_disk(a_ , a_)
move_tower(height - 1 , a_ , a_ , a_)
def lowerCamelCase (a_ :int , a_ :Union[str, Any]) -> str:
print('''moving disk from''' , a_ , '''to''' , a_)
def lowerCamelCase () -> Tuple:
lowercase :int = int(input('''Height of hanoi: ''').strip())
move_tower(a_ , '''A''' , '''B''' , '''C''')
if __name__ == "__main__":
main()
| 677 | 0 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = tokenizer(example['''content'''], truncation=a_ )['''input_ids''']
_UpperCamelCase = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
_a = HfArgumentParser(PretokenizationArguments)
_a = parser.parse_args()
if args.num_workers is None:
_a = multiprocessing.cpu_count()
_a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_a = time.time()
_a = load_dataset(args.dataset_name, split="""train""")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
_a = time.time()
_a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
_a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 19 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCAmelCase = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCAmelCase = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __snake_case ( self : List[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : str=None , snake_case__ : List[Any]="uniform_average" , snake_case__ : Dict=True ):
'''simple docstring'''
lowercase :Dict = mean_squared_error(
snake_case__ , snake_case__ , sample_weight=snake_case__ , multioutput=snake_case__ , squared=snake_case__ )
return {"mse": mse}
| 677 | 0 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: int = hf_hub_url(repo_id=a_ , path=a_ , revision=a_ )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a_ )}"""
| 586 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def __snake_case ( snake_case__ : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError()
| 677 | 0 |
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 204 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :int ):
UpperCamelCase__ :List[str] = n
UpperCamelCase__ :List[Any] = [None] * self.n
UpperCamelCase__ :Tuple = 0 # index of the first element
UpperCamelCase__ :Any = 0
UpperCamelCase__ :int = 0
def __len__( self :Tuple ):
return self.size
def __a ( self :str ):
return self.size == 0
def __a ( self :Any ):
return False if self.is_empty() else self.array[self.front]
def __a ( self :Optional[int] , lowerCamelCase__ :Optional[int] ):
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
UpperCamelCase__ :Optional[Any] = data
UpperCamelCase__ :Optional[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def __a ( self :Optional[Any] ):
if self.size == 0:
raise Exception("""UNDERFLOW""" )
UpperCamelCase__ :Dict = self.array[self.front]
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :str = (self.front + 1) % self.n
self.size -= 1
return temp | 45 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : int=3_2 , snake_case__ : int=3 , snake_case__ : str=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : int=[1, 1, 2, 1] , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase :Union[str, Any] = parent
lowercase :Optional[Any] = batch_size
lowercase :Dict = image_size
lowercase :Any = num_channels
lowercase :List[str] = embeddings_size
lowercase :Union[str, Any] = hidden_sizes
lowercase :Any = depths
lowercase :Dict = is_training
lowercase :Any = use_labels
lowercase :Any = hidden_act
lowercase :List[str] = num_labels
lowercase :List[Any] = scope
lowercase :int = len(snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :Union[str, Any] = self.get_config()
return config, pixel_values
def __snake_case ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Any = FlaxRegNetModel(config=snake_case__ )
lowercase :str = model(snake_case__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.num_labels
lowercase :str = FlaxRegNetForImageClassification(config=snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = self.prepare_config_and_inputs()
lowercase , lowercase :Tuple = config_and_inputs
lowercase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__A : str = False
__A : Tuple = False
__A : Dict = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Dict = FlaxRegNetModelTester(self )
lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(snake_case__ )
lowercase :int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Tuple = [*signature.parameters.keys()]
lowercase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowercase :int = model_class(snake_case__ )
lowercase :Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase :Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase :Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase :List[Any] = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ : str , **snake_case__ : Optional[int] ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest('''JIT Enabled''' ):
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase () -> Tuple:
lowercase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_flax
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase :Optional[Any] = self.default_image_processor
lowercase :Dict = prepare_img()
lowercase :Any = image_processor(images=snake_case__ , return_tensors='''np''' )
lowercase :List[str] = model(**snake_case__ )
# verify the logits
lowercase :Any = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :List[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 677 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a ( __UpperCAmelCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowerCamelCase_ ( self , UpperCAmelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowerCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowerCamelCase_ ( self ):
'''simple docstring'''
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , UpperCAmelCase , UpperCAmelCase=1_6000 , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase_ = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='''pt''' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase_ = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase_ = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase_ = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase_ = len(snake_case__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(snake_case__ )}.""" )
# get prompt text embeddings
lowerCAmelCase_ = self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowerCAmelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ = text_embeddings.shape
lowerCAmelCase_ = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase_ = 42
if negative_prompt is None:
lowerCAmelCase_ = [''''''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="""
F""" {type(snake_case__ )}.""" )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase_ = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowerCAmelCase_ = negative_prompt
lowerCAmelCase_ = text_input_ids.shape[-1]
lowerCAmelCase_ = self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , )
lowerCAmelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ = uncond_embeddings.shape[1]
lowerCAmelCase_ = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase_ = torch.randn(snake_case__ , generator=snake_case__ , device='''cpu''' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase_ = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCAmelCase_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ = {}
if accepts_eta:
lowerCAmelCase_ = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase_ = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase_ = noise_pred.chunk(2 )
lowerCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase_ = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase_ = self.vae.decode(snake_case__ ).sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ ) | 552 |
"""simple docstring"""
UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase (a_ :dict , a_ :List[str] , a_ :Tuple) -> list[str]:
lowercase :str = set()
# keep track of all the paths to be checked
lowercase :Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase :Optional[int] = queue.pop(0)
# get the last node from the path
lowercase :Any = path[-1]
if node not in explored:
lowercase :int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase :List[Any] = list(a_)
new_path.append(a_)
queue.append(a_)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a_)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase (a_ :dict , a_ :List[Any] , a_ :List[Any]) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase :List[str] = [start]
lowercase :Optional[Any] = set(a_)
# Keep tab on distances from `start` node.
lowercase :Union[str, Any] = {start: 0, target: -1}
while queue:
lowercase :Union[str, Any] = queue.pop(0)
if node == target:
lowercase :Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a_)
queue.append(a_)
lowercase :Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 677 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowerCamelCase_ = 'docs/source/en/_toctree.yml'
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> str:
_SCREAMING_SNAKE_CASE = defaultdict(a_ )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(a_ )
_SCREAMING_SNAKE_CASE = new_doc_list
_SCREAMING_SNAKE_CASE = [key for key, value in counts.items() if value > 1]
_SCREAMING_SNAKE_CASE = []
for duplicate_key in duplicates:
_SCREAMING_SNAKE_CASE = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(a_ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_SCREAMING_SNAKE_CASE = sorted(a_ , key=lambda __A : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(a_ ) > 1:
raise ValueError("{doc_list} has two \'overview\' docs which is not allowed." )
overview_doc.extend(a_ )
# Sort
return overview_doc
def SCREAMING_SNAKE_CASE_ ( __A : List[Any]=False ) -> Optional[int]:
with open(a_ , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
_SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_SCREAMING_SNAKE_CASE = content[api_idx]['''sections''']
# Then to the model doc
_SCREAMING_SNAKE_CASE = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_SCREAMING_SNAKE_CASE = api_doc[scheduler_idx]['''sections''']
_SCREAMING_SNAKE_CASE = clean_doc_toc(a_ )
_SCREAMING_SNAKE_CASE = False
if new_scheduler_doc != scheduler_doc:
_SCREAMING_SNAKE_CASE = True
if overwrite:
_SCREAMING_SNAKE_CASE = new_scheduler_doc
if diff:
if overwrite:
_SCREAMING_SNAKE_CASE = api_doc
with open(a_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(a_ , allow_unicode=a_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any]=False ) -> int:
with open(a_ , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
_SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_SCREAMING_SNAKE_CASE = content[api_idx]['''sections''']
# Then to the model doc
_SCREAMING_SNAKE_CASE = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = api_doc[pipeline_idx]['''sections''']
_SCREAMING_SNAKE_CASE = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_SCREAMING_SNAKE_CASE = pipeline_doc['''section''']
_SCREAMING_SNAKE_CASE = clean_doc_toc(a_ )
if overwrite:
_SCREAMING_SNAKE_CASE = new_sub_pipeline_doc
new_pipeline_docs.append(a_ )
# sort overall pipeline doc
_SCREAMING_SNAKE_CASE = clean_doc_toc(a_ )
if new_pipeline_docs != pipeline_docs:
_SCREAMING_SNAKE_CASE = True
if overwrite:
_SCREAMING_SNAKE_CASE = new_pipeline_docs
if diff:
if overwrite:
_SCREAMING_SNAKE_CASE = api_doc
with open(a_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(a_ , allow_unicode=a_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCamelCase_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 418 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase (a_ :str , a_ :List[str]=100 , a_ :Optional[Any]=" ") -> List[str]:
lowercase :str = text.split(a_)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(a_) , a_)]
def lowerCamelCase (a_ :dict) -> dict:
lowercase , lowercase :str = [], []
for title, text in zip(documents['''title'''] , documents['''text''']):
if text is not None:
for passage in split_text(a_):
titles.append(title if title is not None else '''''')
texts.append(a_)
return {"title": titles, "text": texts}
def lowerCamelCase (a_ :dict , a_ :DPRContextEncoder , a_ :DPRContextEncoderTokenizerFast) -> dict:
lowercase :Tuple = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''')['''input_ids''']
lowercase :Optional[Any] = ctx_encoder(input_ids.to(device=a_) , return_dict=a_).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase (a_ :"RagExampleArguments" , a_ :"ProcessingArguments" , a_ :"IndexHnswArguments" , ) -> Any:
######################################
logger.info('''Step 1 - Create the dataset''')
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase :List[Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase :Optional[Any] = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase :str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=a_)
lowercase :Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase :str = Features(
{'''text''': Value('''string'''), '''title''': Value('''string'''), '''embeddings''': Sequence(Value('''float32'''))}) # optional, save as float32 instead of float64 to save space
lowercase :Optional[Any] = dataset.map(
partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , )
# And finally save your dataset
lowercase :str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''')
dataset.save_to_disk(a_)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''')
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase :str = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('''embeddings''' , custom_index=a_)
# And save the index
lowercase :Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''')
dataset.get_index('''embeddings''').save(a_)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __magic_name__ :
__A : str = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
__A : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
__A : Optional[str] = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class __magic_name__ :
__A : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
__A : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class __magic_name__ :
__A : int = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
__A : int = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 677 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Optional[int] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ["ConditionalDetrFeatureExtractor"]
UpperCAmelCase_ : Any = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""facebook/esm2_t6_8M_UR50D""": 10_24,
"""facebook/esm2_t12_35M_UR50D""": 10_24,
}
def __lowercase ( snake_case ):
"""simple docstring"""
with open(a_, '''r''' ) as f:
__magic_name__ :Any = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCamelCase_ ( __UpperCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<cls>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase="<eos>" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**snake_case__ )
__magic_name__ :List[str] = load_vocab_file(snake_case__ )
__magic_name__ :Optional[int] = dict(enumerate(self.all_tokens ) )
__magic_name__ :Optional[Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__magic_name__ :List[str] = unk_token
__magic_name__ :Union[str, Any] = cls_token
__magic_name__ :List[Any] = pad_token
__magic_name__ :Optional[int] = mask_token
__magic_name__ :int = eos_token
__magic_name__ :Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self._id_to_token.get(snake_case__ , self.unk_token )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self._token_to_id.get(snake_case__ , self._token_to_id.get(self.unk_token ) )
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return text.split()
def A ( self , __lowerCAmelCase=False ):
"""simple docstring"""
return len(self._id_to_token )
def A ( self ):
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self._token_to_id.get(snake_case__ , self._token_to_id.get(self.unk_token ) )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self._id_to_token.get(snake_case__ , self.unk_token )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Any = [self.cls_token_id]
__magic_name__ :str = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__magic_name__ :Any = [1] + ([0] * len(snake_case__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case__ ) + [1]
return mask
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = os.path.join(snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(snake_case__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def A ( self ):
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=snake_case__ )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
return super()._add_tokens(snake_case__ , special_tokens=snake_case__ )
| 0 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ :
def __init__( self : Tuple , snake_case__ : str = None , snake_case__ : uuid.UUID = None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None ):
'''simple docstring'''
if not conversation_id:
lowercase :List[Any] = uuid.uuida()
if past_user_inputs is None:
lowercase :Union[str, Any] = []
if generated_responses is None:
lowercase :List[str] = []
lowercase :uuid.UUID = conversation_id
lowercase :List[str] = past_user_inputs
lowercase :List[str] = generated_responses
lowercase :Optional[str] = text
def __eq__( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __snake_case ( self : Optional[int] , snake_case__ : str , snake_case__ : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
lowercase :List[str] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowercase :Optional[int] = text
def __snake_case ( self : Any ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase :Tuple = None
def __snake_case ( self : Tuple , snake_case__ : str ):
'''simple docstring'''
self.generated_responses.append(snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict ):
'''simple docstring'''
lowercase :int = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowercase :Dict = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__UpperCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if self.tokenizer.pad_token_id is None:
lowercase :Any = self.tokenizer.eos_token
def __snake_case ( self : List[Any] , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :str = {}
lowercase :List[str] = {}
lowercase :Tuple = {}
if min_length_for_response is not None:
lowercase :Dict = min_length_for_response
if minimum_tokens is not None:
lowercase :Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
lowercase :List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase :Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , snake_case__ : Union[Conversation, List[Conversation]] , snake_case__ : int=0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :int = super().__call__(snake_case__ , num_workers=snake_case__ , **snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1:
return outputs[0]
return outputs
def __snake_case ( self : List[Any] , snake_case__ : Conversation , snake_case__ : Any=3_2 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowercase :List[str] = self.tokenizer._build_conversation_input_ids(snake_case__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase :List[str] = self._legacy_parse_and_tokenize(snake_case__ )
if self.framework == "pt":
lowercase :int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase :Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __snake_case ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Any=1_0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowercase :Optional[Any] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowercase :int = max_length - minimum_tokens
lowercase :int = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowercase :int = model_inputs['''attention_mask'''][:, -trim:]
lowercase :int = model_inputs.pop('''conversation''' )
lowercase :Union[str, Any] = max_length
lowercase :Dict = self.model.generate(**snake_case__ , **snake_case__ )
if self.model.config.is_encoder_decoder:
lowercase :List[Any] = 1
else:
lowercase :Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any]=True ):
'''simple docstring'''
lowercase :Dict = model_outputs['''output_ids''']
lowercase :Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , )
lowercase :Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(snake_case__ )
return conversation
def __snake_case ( self : List[Any] , snake_case__ : Conversation ):
'''simple docstring'''
lowercase :str = self.tokenizer.eos_token_id
lowercase :List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
if len(snake_case__ ) > self.tokenizer.model_max_length:
lowercase :List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 677 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case = logging.get_logger(__name__)
snake_case = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __A ( __UpperCAmelCase ,__UpperCAmelCase ):
'''simple docstring'''
a_ = "resnet"
a_ = ["basic", "bottleneck"]
def __init__( self , _snake_case=3 , _snake_case=64 , _snake_case=[256, 512, 1024, 2048] , _snake_case=[3, 4, 6, 3] , _snake_case="bottleneck" , _snake_case="relu" , _snake_case=False , _snake_case=None , _snake_case=None , **_snake_case , ):
super().__init__(**snake_case__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : str = embedding_size
_lowerCAmelCase : int = hidden_sizes
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : Any = layer_type
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = downsample_in_first_stage
_lowerCAmelCase : Optional[int] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase : int = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
class __A ( __UpperCAmelCase ):
'''simple docstring'''
a_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-3
| 424 |
"""simple docstring"""
def lowerCamelCase (a_ :int = 100) -> int:
lowercase :Union[str, Any] = set()
lowercase :List[Any] = 0
lowercase :Dict = n + 1 # maximum limit
for a in range(2 , a_):
for b in range(2 , a_):
lowercase :Tuple = a**b # calculates the current power
collect_powers.add(a_) # adds the result to the set
return len(a_)
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 677 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
def UpperCamelCase_( snake_case : Tuple , snake_case : int=False , snake_case : Optional[Any]=False , snake_case : int=False ):
'''simple docstring'''
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'transformer.blocks.{i}.norm1.weight', f'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm1.bias', f'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.weight', f'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.bias', f'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.norm2.weight', f'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm2.bias', f'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.mlp.fc1.weight', f'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc1.bias', f'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.weight', f'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.bias', f'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def UpperCamelCase_( snake_case : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
snake_case_ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.weight' )
snake_case_ = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def UpperCamelCase_( snake_case : Dict , snake_case : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = dct.pop(a_ )
snake_case_ = val
@torch.no_grad()
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Any ):
'''simple docstring'''
snake_case_ = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a_ )
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
if "vqa" in checkpoint_url:
snake_case_ = True
snake_case_ = 3_1_2_9
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''vqa2-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(a_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = ViltForQuestionAnswering(a_ )
elif "nlvr" in checkpoint_url:
snake_case_ = True
snake_case_ = 2
snake_case_ = {0: '''False''', 1: '''True'''}
snake_case_ = {v: k for k, v in config.idalabel.items()}
snake_case_ = 3
snake_case_ = ViltForImagesAndTextClassification(a_ )
elif "irtr" in checkpoint_url:
snake_case_ = True
snake_case_ = ViltForImageAndTextRetrieval(a_ )
elif "mlm_itm" in checkpoint_url:
snake_case_ = True
snake_case_ = ViltForMaskedLM(a_ )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
snake_case_ = torch.hub.load_state_dict_from_url(a_ , map_location="cpu" )['''state_dict''']
snake_case_ = create_rename_keys(a_ , a_ , a_ , a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_q_k_v(a_ , a_ )
if mlm_model or irtr_model:
snake_case_ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
snake_case_ = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a_ )
# Define processor
snake_case_ = ViltImageProcessor(size=3_8_4 )
snake_case_ = BertTokenizer.from_pretrained("bert-base-uncased" )
snake_case_ = ViltProcessor(a_ , a_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
snake_case_ = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=a_ ).raw )
snake_case_ = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=a_ ).raw )
snake_case_ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
snake_case_ = processor(a_ , a_ , return_tensors="pt" )
snake_case_ = processor(a_ , a_ , return_tensors="pt" )
snake_case_ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
snake_case_ = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=a_ ).raw )
if mlm_model:
snake_case_ = '''a bunch of [MASK] laying on a [MASK].'''
else:
snake_case_ = '''How many cats are there?'''
snake_case_ = processor(a_ , a_ , return_tensors="pt" )
snake_case_ = model(**a_ )
# Verify outputs
if mlm_model:
snake_case_ = torch.Size([1, 1_1, 3_0_5_2_2] )
snake_case_ = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a_ , atol=1e-4 )
# verify masked token prediction equals "cats"
snake_case_ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
snake_case_ = torch.Size([1, 3_1_2_9] )
snake_case_ = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a_ , atol=1e-4 )
# verify vqa prediction equals "2"
snake_case_ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
snake_case_ = torch.Size([1, 2] )
snake_case_ = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a_ ).mkdir(exist_ok=a_ )
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 400 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[Any] = "xlm-prophetnet"
__A : List[str] = ["past_key_values"]
__A : int = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : Any , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[Union[str, Callable]] = "gelu" , snake_case__ : Optional[int] = 3_0_5_2_2 , snake_case__ : Optional[int] = 1_0_2_4 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[int] = 5_1_2 , snake_case__ : Optional[float] = 0.02 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 2 , snake_case__ : Optional[int] = 3_2 , snake_case__ : Optional[int] = 1_2_8 , snake_case__ : Optional[bool] = False , snake_case__ : Optional[float] = 0.0 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 1 , snake_case__ : Optional[int] = 2 , **snake_case__ : List[str] , ):
'''simple docstring'''
lowercase :Tuple = vocab_size
lowercase :Optional[int] = hidden_size
lowercase :Optional[int] = encoder_ffn_dim
lowercase :Optional[int] = num_encoder_layers
lowercase :Dict = num_encoder_attention_heads
lowercase :List[str] = decoder_ffn_dim
lowercase :Dict = num_decoder_layers
lowercase :List[Any] = num_decoder_attention_heads
lowercase :Optional[int] = max_position_embeddings
lowercase :Tuple = init_std # Normal(0, this parameter)
lowercase :int = activation_function
# parameters for xlmprophetnet
lowercase :Dict = ngram
lowercase :Optional[Any] = num_buckets
lowercase :Dict = relative_max_distance
lowercase :List[Any] = disable_ngram_loss
lowercase :Optional[Any] = eps
# 3 Types of Dropout
lowercase :Any = attention_dropout
lowercase :List[str] = activation_dropout
lowercase :List[str] = dropout
lowercase :List[str] = use_cache
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 677 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ ={
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case=True, __snake_case="pt" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {'''add_prefix_space''': True} if isinstance(a_, a_ ) and not line.startswith(''' ''' ) else {}
_UpperCamelCase = padding_side
return tokenizer(
[line], max_length=a_, padding='''max_length''' if pad_to_max_length else None, truncation=a_, return_tensors=a_, add_special_tokens=a_, **a_, )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = input_ids.ne(a_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _UpperCAmelCase( __UpperCAmelCase ):
def __init__( self , __a , __a , __a , __a , __a="train" , __a=None , __a=None , __a=None , __a="" , ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = Path(snake_case__).joinpath(type_path + '''.source''')
_UpperCamelCase = Path(snake_case__).joinpath(type_path + '''.target''')
_UpperCamelCase = self.get_char_lens(self.src_file)
_UpperCamelCase = max_source_length
_UpperCamelCase = max_target_length
assert min(self.src_lens) > 0, F'''found empty line in {self.src_file}'''
_UpperCamelCase = tokenizer
_UpperCamelCase = prefix
if n_obs is not None:
_UpperCamelCase = self.src_lens[:n_obs]
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
def __len__( self) -> str:
'''simple docstring'''
return len(self.src_lens)
def __getitem__( self , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = index + 1 # linecache starts at 1
_UpperCamelCase = self.prefix + linecache.getline(str(self.src_file) , snake_case__).rstrip('''\n''')
_UpperCamelCase = linecache.getline(str(self.tgt_file) , snake_case__).rstrip('''\n''')
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__) else self.tokenizer
)
_UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__) else self.tokenizer
_UpperCamelCase = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''')
_UpperCamelCase = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''')
_UpperCamelCase = source_inputs['''input_ids'''].squeeze()
_UpperCamelCase = target_inputs['''input_ids'''].squeeze()
_UpperCamelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase ( __a) -> Tuple:
'''simple docstring'''
return [len(snake_case__) for x in Path(snake_case__).open().readlines()]
def UpperCAmelCase ( self , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = torch.stack([x['''input_ids'''] for x in batch])
_UpperCamelCase = torch.stack([x['''attention_mask'''] for x in batch])
_UpperCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch])
_UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__)
else self.tokenizer.pad_token_id
)
_UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__)
else self.tokenizer.pad_token_id
)
_UpperCamelCase = trim_batch(snake_case__ , snake_case__)
_UpperCamelCase = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__)
_UpperCamelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
_a = getLogger(__name__)
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return list(itertools.chain.from_iterable(a_ ) )
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
_UpperCamelCase = get_git_info()
save_json(a_, os.path.join(a_, '''git_log.json''' ) )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=4, **__snake_case ) -> str:
"""simple docstring"""
with open(a_, '''w''' ) as f:
json.dump(a_, a_, indent=a_, **a_ )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with open(a_ ) as f:
return json.load(a_ )
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = git.Repo(search_parent_directories=a_ )
_UpperCamelCase = {
'''repo_id''': str(a_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List:
"""simple docstring"""
return list(map(a_, a_ ) )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
with open(a_, '''wb''' ) as f:
return pickle.dump(a_, a_ )
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
def remove_articles(__snake_case ):
return re.sub(r'''\b(a|an|the)\b''', ''' ''', a_ )
def white_space_fix(__snake_case ):
return " ".join(text.split() )
def remove_punc(__snake_case ):
_UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_ ) ) ) )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = normalize_answer(a_ ).split()
_UpperCamelCase = normalize_answer(a_ ).split()
_UpperCamelCase = Counter(a_ ) & Counter(a_ )
_UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCamelCase = 1.0 * num_same / len(a_ )
_UpperCamelCase = 1.0 * num_same / len(a_ )
_UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
return normalize_answer(a_ ) == normalize_answer(a_ )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
assert len(a_ ) == len(a_ )
_UpperCamelCase = 0
for hypo, pred in zip(a_, a_ ):
em += exact_match_score(a_, a_ )
if len(a_ ) > 0:
em /= len(a_ )
return {"em": em}
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCamelCase = '''dropout_rate'''
for p in extra_params:
if getattr(a_, a_, a_ ):
if not hasattr(a_, a_ ) and not hasattr(a_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(a_ ) )
delattr(a_, a_ )
continue
_UpperCamelCase = p if hasattr(a_, a_ ) else equivalent_param[p]
setattr(a_, a_, getattr(a_, a_ ) )
delattr(a_, a_ )
return hparams, config
| 19 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = ["image_processor", "tokenizer"]
__A : Dict = "BlipImageProcessor"
__A : Dict = "AutoTokenizer"
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str ):
'''simple docstring'''
lowercase :Dict = False
super().__init__(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = self.image_processor
def __call__( self : Optional[int] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase :List[Any] = self.tokenizer
lowercase :str = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowercase :Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowercase :int = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowercase :Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def __snake_case ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def __snake_case ( self : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.tokenizer.model_input_names
lowercase :List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 677 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__A = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__A = logging.getLogger()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
lowercase__: List[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__: int = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase="eval" ) -> Optional[Any]:
lowercase__: Optional[int] = os.path.join(a_ , F"""{split}_results.json""" )
if os.path.exists(a_ ):
with open(a_ , '''r''' ) as f:
return json.load(a_ )
raise ValueError(F"""can't find {path}""" )
__A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase (__UpperCAmelCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.get_auto_remove_tmp_dir()
lowercase__: Dict = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(snake_case__ , '''argv''' , snake_case__ ):
run_flax_glue.main()
lowercase__: Any = get_results(snake_case__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def _snake_case ( self ):
lowercase__: Optional[int] = self.get_auto_remove_tmp_dir()
lowercase__: Any = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(snake_case__ , '''argv''' , snake_case__ ):
run_clm_flax.main()
lowercase__: Optional[Any] = get_results(snake_case__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def _snake_case ( self ):
lowercase__: str = self.get_auto_remove_tmp_dir()
lowercase__: Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(snake_case__ , '''argv''' , snake_case__ ):
run_summarization_flax.main()
lowercase__: Tuple = get_results(snake_case__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def _snake_case ( self ):
lowercase__: Optional[int] = self.get_auto_remove_tmp_dir()
lowercase__: Any = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(snake_case__ , '''argv''' , snake_case__ ):
run_mlm_flax.main()
lowercase__: List[Any] = get_results(snake_case__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def _snake_case ( self ):
lowercase__: List[str] = self.get_auto_remove_tmp_dir()
lowercase__: Dict = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(snake_case__ , '''argv''' , snake_case__ ):
run_ta_mlm_flax.main()
lowercase__: Tuple = get_results(snake_case__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def _snake_case ( self ):
lowercase__: Any = 7 if get_gpu_count() > 1 else 2
lowercase__: Dict = self.get_auto_remove_tmp_dir()
lowercase__: int = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(snake_case__ , '''argv''' , snake_case__ ):
run_flax_ner.main()
lowercase__: Tuple = get_results(snake_case__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def _snake_case ( self ):
lowercase__: int = self.get_auto_remove_tmp_dir()
lowercase__: Optional[Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(snake_case__ , '''argv''' , snake_case__ ):
run_qa.main()
lowercase__: List[str] = get_results(snake_case__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 586 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( __UpperCAmelCase ):
@require_torch
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[Any] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Any = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :Tuple = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :Any = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :List[str] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Dict = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :List[Any] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :str = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :str = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase :Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase :Optional[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :Union[str, Any] = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase :Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :Any = '''1'''
lowercase :Optional[Any] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Dict = '''
from transformers import pipeline
'''
lowercase :Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase :Dict = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase :Tuple = self.get_env()
lowercase :Optional[Any] = '''1'''
lowercase :Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = '''
from transformers import AutoModel
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :List[str] = self.get_env()
lowercase :Optional[int] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :Tuple = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 677 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
snake_case : Dict = set()
snake_case : Dict = []
def parse_line(__lowerCamelCase : List[str] ):
for line in fp:
if isinstance(a_ , a_ ):
snake_case : Optional[Any] = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(a_ ) > 0:
snake_case : Union[str, Any] = '''\n'''.join(a_ )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(a_ )
buffer.clear()
continue
else:
snake_case : str = line.strip()
buffer.append(a_ )
if from_gh:
for filename in os.listdir(a_ ):
snake_case : Dict = os.path.join(a_ , a_ )
if not os.path.isdir(a_ ):
# read the file
if filename != "warnings.txt":
continue
with open(a_ ) as fp:
parse_line(a_ )
else:
try:
with zipfile.ZipFile(a_ ) as z:
for filename in z.namelist():
if not os.path.isdir(a_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a_ ) as fp:
parse_line(a_ )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ):
snake_case : List[Any] = set()
snake_case : int = [os.path.join(a_ , a_ ) for p in os.listdir(a_ ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a_ , a_ ) )
return selected_warnings
if __name__ == "__main__":
def UpperCamelCase ( __lowerCamelCase : Dict ):
return values.split("," )
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowerCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowerCamelCase = extract_warnings(args.output_dir, args.targets)
__lowerCamelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 204 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger()
@dataclass
class __magic_name__ :
__A : nn.Module
__A : List[nn.Module] = field(default_factory=__UpperCAmelCase )
__A : list = field(default_factory=__UpperCAmelCase )
def __snake_case ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tensor , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self : int , snake_case__ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def __snake_case ( self : int ):
'''simple docstring'''
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
__A : nn.Module
__A : nn.Module
__A : int = 0
__A : List = field(default_factory=__UpperCAmelCase )
__A : List = field(default_factory=__UpperCAmelCase )
def __call__( self : Dict , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :Dict = Tracker(self.dest )(snake_case__ ).parametrized
lowercase :Optional[Any] = Tracker(self.src )(snake_case__ ).parametrized
lowercase :List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
lowercase :Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while"""
f""" destination module has {len(snake_case__ )}.""" )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCamelCase (a_ :str , a_ :ResNetConfig , a_ :Path , a_ :bool = True) -> Optional[Any]:
print(F"""Converting {name}...""")
with torch.no_grad():
lowercase :Union[str, Any] = timm.create_model(a_ , pretrained=a_).eval()
lowercase :Tuple = ResNetForImageClassification(a_).eval()
lowercase :int = ModuleTransfer(src=a_ , dest=a_)
lowercase :List[Any] = torch.randn((1, 3, 224, 224))
module_transfer(a_)
assert torch.allclose(from_model(a_) , our_model(a_).logits), "The model logits don't match the original one."
lowercase :List[Any] = F"""resnet{'-'.join(name.split('resnet'))}"""
print(a_)
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a_ , )
# we can use the convnext one
lowercase :Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''')
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a_ , )
print(F"""Pushed {checkpoint_name}""")
def lowerCamelCase (a_ :Path , a_ :str = None , a_ :bool = True) -> int:
lowercase :Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase :Union[str, Any] = 1000
lowercase :Any = (1, num_labels)
lowercase :Tuple = '''huggingface/label-files'''
lowercase :List[str] = num_labels
lowercase :Union[str, Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :Any = {int(a_): v for k, v in idalabel.items()}
lowercase :str = idalabel
lowercase :Any = {v: k for k, v in idalabel.items()}
lowercase :Union[str, Any] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_)
lowercase :Optional[int] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_)
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 677 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCamelCase = trt.Logger(trt.Logger.WARNING)
UpperCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
UpperCamelCase = parser.parse_args()
if args.tokenizer_name:
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
UpperCamelCase = args.per_device_eval_batch_size
UpperCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCamelCase = True
UpperCamelCase = "temp_engine/bert-fp32.engine"
if args.fpaa:
UpperCamelCase = "temp_engine/bert-fp16.engine"
if args.inta:
UpperCamelCase = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
UpperCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
UpperCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : str , lowercase__ : str ) -> str:
UpperCamelCase__ :List[Any] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
UpperCamelCase__ :Optional[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
UpperCamelCase__ :Optional[Any] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , a_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , a_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , a_ )
# start time
UpperCamelCase__ :Union[str, Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(a_ ) for d_inp in d_inputs] + [int(a_ ), int(a_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(a_ , a_ , a_ )
cuda.memcpy_dtoh_async(a_ , a_ , a_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
UpperCamelCase__ :List[str] = time.time()
UpperCamelCase__ :int = end_time - start_time
UpperCamelCase__ :int = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCamelCase = raw_datasets["validation"].column_names
UpperCamelCase = "question" if "question" in column_names else column_names[0]
UpperCamelCase = "context" if "context" in column_names else column_names[1]
UpperCamelCase = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCamelCase = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
UpperCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def A ( lowercase__ : Dict ) -> List[Any]:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
UpperCamelCase__ :int = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
UpperCamelCase__ :int = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=a_ , stride=args.doc_stride , return_overflowing_tokens=a_ , return_offsets_mapping=a_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
UpperCamelCase__ :List[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
UpperCamelCase__ :List[str] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
UpperCamelCase__ :Any = tokenized_examples.sequence_ids(a_ )
UpperCamelCase__ :List[str] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
UpperCamelCase__ :int = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
UpperCamelCase__ :str = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
UpperCamelCase = raw_datasets["validation"]
# Validation Feature Creation
UpperCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
UpperCamelCase = default_data_collator
UpperCamelCase = eval_dataset.remove_columns(["example_id", "offset_mapping"])
UpperCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def A ( lowercase__ : Any , lowercase__ : int , lowercase__ : str , lowercase__ : Any="eval" ) -> int:
# Post-processing: we match the start logits and end logits to answers in the original context.
UpperCamelCase__ :Dict = postprocess_qa_predictions(
examples=a_ , features=a_ , predictions=a_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=a_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
UpperCamelCase__ :List[str] = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
UpperCamelCase__ :Any = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
UpperCamelCase__ :Optional[Any] = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=a_ , label_ids=a_ )
UpperCamelCase = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def A ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
return trt.volume(engine.get_binding_shape(a_ ) ) * engine.get_binding_dtype(a_ ).itemsize
# Allocate device memory for inputs and outputs.
UpperCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCamelCase = cuda.mem_alloc(h_outputa.nbytes)
UpperCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCamelCase = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
UpperCamelCase = 0.0
UpperCamelCase = 0
UpperCamelCase = timeit.default_timer()
UpperCamelCase = None
for step, batch in enumerate(eval_dataloader):
UpperCamelCase , UpperCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCamelCase , UpperCamelCase = outputs
UpperCamelCase = torch.tensor(start_logits)
UpperCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
UpperCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
UpperCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
UpperCamelCase = nested_truncate(all_preds, len(eval_dataset))
UpperCamelCase = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_000))
logger.info("Total Number of Inference = %d", niter)
UpperCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 45 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : int = False
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=False ):
'''simple docstring'''
lowercase :Union[str, Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowercase :Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Any , snake_case__ : Dict , snake_case__ : Dict=1_3 , snake_case__ : Tuple=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Any=9_9 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Any=2 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=3_7 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : List[str]=1_6 , snake_case__ : Union[str, Any]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=4 , snake_case__ : int=None , ):
'''simple docstring'''
lowercase :Tuple = parent
lowercase :Tuple = batch_size
lowercase :Optional[Any] = seq_length
lowercase :Optional[Any] = is_training
lowercase :Optional[Any] = use_input_mask
lowercase :List[Any] = use_token_type_ids
lowercase :str = use_labels
lowercase :List[str] = vocab_size
lowercase :str = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Dict = num_attention_heads
lowercase :Any = intermediate_size
lowercase :List[str] = hidden_act
lowercase :Optional[Any] = hidden_dropout_prob
lowercase :List[Any] = attention_probs_dropout_prob
lowercase :List[Any] = max_position_embeddings
lowercase :List[Any] = type_vocab_size
lowercase :Union[str, Any] = type_sequence_label_size
lowercase :Union[str, Any] = initializer_range
lowercase :Any = num_labels
lowercase :int = num_choices
lowercase :Dict = scope
lowercase :Dict = embedding_size
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :int = None
if self.use_input_mask:
lowercase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase :Tuple = None
if self.use_token_type_ids:
lowercase :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase :Union[str, Any] = None
lowercase :int = None
lowercase :str = None
if self.use_labels:
lowercase :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase :Optional[int] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Dict = TFMobileBertModel(config=snake_case__ )
lowercase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
lowercase :Optional[int] = [input_ids, input_mask]
lowercase :Optional[int] = model(snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Any = TFMobileBertForMaskedLM(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Optional[Any] = TFMobileBertForNextSentencePrediction(config=snake_case__ )
lowercase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[Any] = TFMobileBertForSequenceClassification(config=snake_case__ )
lowercase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :Tuple = self.num_choices
lowercase :Any = TFMobileBertForMultipleChoice(config=snake_case__ )
lowercase :Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[str] = TFMobileBertForTokenClassification(config=snake_case__ )
lowercase :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str ):
'''simple docstring'''
lowercase :Union[str, Any] = TFMobileBertForQuestionAnswering(config=snake_case__ )
lowercase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Dict = config_and_inputs
lowercase :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
@slow
def __snake_case ( self : int ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowercase :List[str] = TFMobileBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase :Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase :List[Any] = model(snake_case__ )[0]
lowercase :Union[str, Any] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case__ )
lowercase :Optional[int] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 677 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase_ = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def UpperCAmelCase ( _lowercase : Any , _lowercase : Tuple , _lowercase : Dict=None , _lowercase : Union[str, Any]=None , _lowercase : Optional[int]=None , _lowercase : Dict=None , _lowercase : List[str]=None , _lowercase : Dict=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __a :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = initializer_range
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase_ = shift_tokens_right(snake_case__ , 1 , 2 )
lowerCAmelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=snake_case__ , )
lowerCAmelCase_ = prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 20
lowerCAmelCase_ = model_class_name(snake_case__ )
lowerCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ )
lowerCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , )
lowerCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case__ , )
lowerCAmelCase_ = model.decode(snake_case__ , snake_case__ )
lowerCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 20
lowerCAmelCase_ = model_class_name(snake_case__ )
lowerCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ )
lowerCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , )
lowerCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case__ , decoder_position_ids=snake_case__ , )
lowerCAmelCase_ = model.decode(snake_case__ , snake_case__ , decoder_attention_mask=snake_case__ )
lowerCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __a ( unittest.TestCase ):
lowerCamelCase : List[str] =99
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self._get_config_and_data()
lowerCAmelCase_ = FlaxBlenderbotSmallForConditionalGeneration(snake_case__ )
lowerCAmelCase_ = lm_model(input_ids=snake_case__ )
lowerCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , snake_case__ )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCAmelCase_ = FlaxBlenderbotSmallForConditionalGeneration(snake_case__ )
lowerCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase_ = lm_model(input_ids=snake_case__ , decoder_input_ids=snake_case__ )
lowerCAmelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , snake_case__ )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCAmelCase_ = shift_tokens_right(snake_case__ , 1 , 2 )
lowerCAmelCase_ = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase_ = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(snake_case__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __a ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase ):
lowerCamelCase : Optional[int] =True
lowerCamelCase : List[Any] =(
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase : Dict =(FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = FlaxBlenderbotSmallModelTester(self )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case__ , snake_case__ , snake_case__ )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase_ = model_class(snake_case__ )
@jax.jit
def encode_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model.encode(input_ids=snake_case__ , attention_mask=snake_case__ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase_ = encode_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase_ = encode_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ = model_class(snake_case__ )
lowerCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return model.decode(
decoder_input_ids=snake_case__ , decoder_attention_mask=snake_case__ , encoder_outputs=snake_case__ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase_ = decode_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase_ = decode_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase_ = model(snake_case__ )
self.assertIsNotNone(snake_case__ ) | 552 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCamelCase (a_ :int) -> List[str]:
random.seed(a_)
np.random.seed(a_)
torch.manual_seed(a_)
torch.cuda.manual_seed_all(a_)
# ^^ safe to call this function even if cuda is not available
class __magic_name__ :
def __init__( self : Optional[Any] , snake_case__ : Iterable[torch.nn.Parameter] , snake_case__ : float = 0.99_99 , snake_case__ : float = 0.0 , snake_case__ : int = 0 , snake_case__ : bool = False , snake_case__ : Union[float, int] = 1.0 , snake_case__ : Union[float, int] = 2 / 3 , snake_case__ : Optional[Any] = None , snake_case__ : Dict[str, Any] = None , **snake_case__ : Tuple , ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :int = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase :Optional[Any] = True
if kwargs.get('''max_value''' , snake_case__ ) is not None:
lowercase :Optional[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :Optional[int] = kwargs['''max_value''']
if kwargs.get('''min_value''' , snake_case__ ) is not None:
lowercase :List[Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :str = kwargs['''min_value''']
lowercase :Any = list(snake_case__ )
lowercase :Optional[Any] = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , snake_case__ ) is not None:
lowercase :str = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
self.to(device=kwargs['''device'''] )
lowercase :int = None
lowercase :int = decay
lowercase :Union[str, Any] = min_decay
lowercase :List[Any] = update_after_step
lowercase :Union[str, Any] = use_ema_warmup
lowercase :Any = inv_gamma
lowercase :Any = power
lowercase :str = 0
lowercase :int = None # set in `step()`
lowercase :List[str] = model_cls
lowercase :Any = model_config
@classmethod
def __snake_case ( cls : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase , lowercase :int = model_cls.load_config(snake_case__ , return_unused_kwargs=snake_case__ )
lowercase :List[Any] = model_cls.from_pretrained(snake_case__ )
lowercase :Optional[int] = cls(model.parameters() , model_cls=snake_case__ , model_config=model.config )
ema_model.load_state_dict(snake_case__ )
return ema_model
def __snake_case ( self : int , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
lowercase :Dict = self.model_cls.from_config(self.model_config )
lowercase :Tuple = self.state_dict()
state_dict.pop('''shadow_params''' , snake_case__ )
model.register_to_config(**snake_case__ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case__ )
def __snake_case ( self : int , snake_case__ : int ):
'''simple docstring'''
lowercase :Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase :int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase :Dict = (1 + step) / (1_0 + step)
lowercase :Optional[int] = min(snake_case__ , self.decay )
# make sure decay is not smaller than min_decay
lowercase :Optional[int] = max(snake_case__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __snake_case ( self : Any , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :Tuple = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Union[str, Any] = parameters.parameters()
lowercase :Optional[Any] = list(snake_case__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase :List[Any] = self.get_decay(self.optimization_step )
lowercase :Optional[Any] = decay
lowercase :List[Any] = 1 - decay
lowercase :List[str] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase :Union[str, Any] = deepspeed.zero.GatheredParameters(snake_case__ , modifier_rank=snake_case__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case__ )
def __snake_case ( self : str , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :Optional[Any] = list(snake_case__ )
for s_param, param in zip(self.shadow_params , snake_case__ ):
param.data.copy_(s_param.to(param.device ).data )
def __snake_case ( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Dict=None ):
'''simple docstring'''
lowercase :str = [
p.to(device=snake_case__ , dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ )
for p in self.shadow_params
]
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __snake_case ( self : Optional[int] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :str = [param.detach().cpu().clone() for param in parameters]
def __snake_case ( self : List[Any] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , snake_case__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase :Dict = None
def __snake_case ( self : Union[str, Any] , snake_case__ : dict ):
'''simple docstring'''
lowercase :List[str] = copy.deepcopy(snake_case__ )
lowercase :Any = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
lowercase :int = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , snake_case__ ):
raise ValueError('''Invalid min_decay''' )
lowercase :List[Any] = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , snake_case__ ):
raise ValueError('''Invalid optimization_step''' )
lowercase :int = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , snake_case__ ):
raise ValueError('''Invalid update_after_step''' )
lowercase :Optional[int] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case__ ):
raise ValueError('''Invalid use_ema_warmup''' )
lowercase :Any = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
lowercase :Dict = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
lowercase :Optional[int] = state_dict.get('''shadow_params''' , snake_case__ )
if shadow_params is not None:
lowercase :List[Any] = shadow_params
if not isinstance(self.shadow_params , snake_case__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(snake_case__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 677 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> None:
_SCREAMING_SNAKE_CASE = analyze_text(a_ )
_SCREAMING_SNAKE_CASE = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
_SCREAMING_SNAKE_CASE = sum(single_char_strings.values() )
# one length string
_SCREAMING_SNAKE_CASE = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_SCREAMING_SNAKE_CASE = single_char_strings[ch]
_SCREAMING_SNAKE_CASE = my_str / all_sum
my_fir_sum += prob * math.loga(a_ ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
_SCREAMING_SNAKE_CASE = sum(two_char_strings.values() )
_SCREAMING_SNAKE_CASE = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_SCREAMING_SNAKE_CASE = cha + cha
if sequence in two_char_strings:
_SCREAMING_SNAKE_CASE = two_char_strings[sequence]
_SCREAMING_SNAKE_CASE = int(a_ ) / all_sum
my_sec_sum += prob * math.loga(a_ )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> tuple[dict, dict]:
_SCREAMING_SNAKE_CASE = Counter() # type: ignore
_SCREAMING_SNAKE_CASE = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 418 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :int , a_ :Union[str, Any] , a_ :List[Any]) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[int] , a_ :str , a_ :Any="attention") -> Optional[int]:
lowercase :Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :])
lowercase :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2])
lowercase :str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :])
lowercase :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2])
lowercase :int = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :])
lowercase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2])
lowercase :List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :])
lowercase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2])
return k, o, q, v
def lowerCamelCase (a_ :Any , a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> List[Any]:
if split_mlp_wi:
lowercase :List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase :Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase :Dict = (wi_a, wi_a)
else:
lowercase :Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase :Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase (a_ :Any , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase (a_ :dict , *, a_ :int , a_ :bool , a_ :bool = False) -> int:
lowercase :Dict = traverse_util.flatten_dict(variables['''target'''])
lowercase :Optional[Any] = {'''/'''.join(a_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase :str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , a_)
lowercase :str = collections.OrderedDict()
# Shared embeddings.
lowercase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Union[str, Any] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :Tuple = tax_attention_lookup(a_ , a_ , '''encoder''' , '''attention''')
lowercase :Dict = layer_norm
lowercase :Dict = k.T
lowercase :Union[str, Any] = o.T
lowercase :List[Any] = q.T
lowercase :int = v.T
# Block i, layer 1 (MLP).
lowercase :Optional[int] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :str = tax_mlp_lookup(a_ , a_ , '''encoder''' , a_)
lowercase :int = layer_norm
if split_mlp_wi:
lowercase :Tuple = wi[0].T
lowercase :Tuple = wi[1].T
else:
lowercase :int = wi.T
lowercase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Dict = tax_relpos_bias_lookup(
a_ , a_ , '''encoder''').T
lowercase :str = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase :str = tax_relpos_bias_lookup(
a_ , 0 , '''encoder''').T
lowercase :List[Any] = tax_relpos_bias_lookup(
a_ , 0 , '''decoder''').T
if not is_encoder_only:
# Decoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_self_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :str = tax_attention_lookup(a_ , a_ , '''decoder''' , '''self_attention''')
lowercase :List[str] = layer_norm
lowercase :Dict = k.T
lowercase :List[Any] = o.T
lowercase :List[Any] = q.T
lowercase :Any = v.T
# Block i, layer 1 (Cross Attention).
lowercase :Tuple = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_cross_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :int = tax_attention_lookup(a_ , a_ , '''decoder''' , '''encoder_decoder_attention''')
lowercase :int = layer_norm
lowercase :Dict = k.T
lowercase :int = o.T
lowercase :List[Any] = q.T
lowercase :Tuple = v.T
# Block i, layer 2 (MLP).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :Tuple = tax_mlp_lookup(a_ , a_ , '''decoder''' , a_)
lowercase :Any = layer_norm
if split_mlp_wi:
lowercase :int = wi[0].T
lowercase :Union[str, Any] = wi[1].T
else:
lowercase :int = wi.T
lowercase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Union[str, Any] = tax_relpos_bias_lookup(a_ , a_ , '''decoder''').T
lowercase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase :int = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase (a_ :Dict , a_ :bool) -> Tuple:
lowercase :str = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase :Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase :Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''')
lowercase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase (a_ :List[str] , a_ :List[str] , a_ :Tuple , a_ :Optional[int] , a_ :List[str]) -> List[str]:
lowercase :Optional[Any] = checkpoints.load_tax_checkpoint(a_)
lowercase :Optional[int] = convert_tax_to_pytorch(
a_ , num_layers=config.num_layers , is_encoder_only=a_ , scalable_attention=a_)
lowercase :Union[str, Any] = make_state_dict(a_ , a_)
model.load_state_dict(a_ , strict=a_)
def lowerCamelCase (a_ :str , a_ :Optional[int] , a_ :Any , a_ :bool = False , a_ :bool = False , ) -> Tuple:
lowercase :Optional[int] = MTaConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase :Union[str, Any] = UMTaEncoderModel(a_)
else:
lowercase :int = UMTaForConditionalGeneration(a_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a_ , a_ , a_ , a_ , a_)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
model.save_pretrained(a_)
# Verify that we can load the checkpoint.
model.from_pretrained(a_)
print('''Done''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 677 | 0 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase_ : int = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCAmelCase_ : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : int = []
_a : Union[str, Any] = len(a_ )
for i in range(a_ ):
_a : float = -1
for j in range(i + 1 , a_ ):
if arr[i] < arr[j]:
_a : Optional[int] = arr[j]
break
result.append(a_ )
return result
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Union[str, Any] = []
for i, outer in enumerate(a_ ):
_a : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a : Optional[int] = inner
break
result.append(a_ )
return result
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Optional[Any] = len(a_ )
_a : list[float] = []
_a : list[float] = [-1] * arr_size
for index in reversed(range(a_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a : Optional[int] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCAmelCase_ : Union[str, Any] = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 120 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __lowercase ( snake_case ):
"""simple docstring"""
if isinstance(a_, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCamelCase_ :
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
pass
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(snake_case__ , snake_case__ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
__magic_name__ :List[Any] = FlaxVisionTextDualEncoderModel(snake_case__ )
__magic_name__ :Optional[Any] = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = self.get_vision_text_model(snake_case__ , snake_case__ )
__magic_name__ :Dict = {'''vision_model''': vision_model, '''text_model''': text_model}
__magic_name__ :List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
__magic_name__ :Dict = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = self.get_vision_text_model(snake_case__ , snake_case__ )
__magic_name__ :Optional[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
__magic_name__ :Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
__magic_name__ :str = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
__magic_name__ :Optional[int] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
__magic_name__ :Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
__magic_name__ :int = model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
__magic_name__ :Optional[int] = after_output[0]
__magic_name__ :Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1E-3 )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = self.get_vision_text_model(snake_case__ , snake_case__ )
__magic_name__ :Optional[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
__magic_name__ :Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
__magic_name__ :Any = model(
input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , output_attentions=snake_case__ )
__magic_name__ :List[Any] = output.vision_model_output.attentions
self.assertEqual(len(snake_case__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ :Optional[int] = to_atuple(vision_model.config.image_size )
__magic_name__ :str = to_atuple(vision_model.config.patch_size )
__magic_name__ :Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ :Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ :Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(snake_case__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
pt_model.to(snake_case__ )
pt_model.eval()
# prepare inputs
__magic_name__ :Optional[Any] = inputs_dict
__magic_name__ :Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__magic_name__ :int = pt_model(**snake_case__ ).to_tuple()
__magic_name__ :List[Any] = fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
__magic_name__ :Any = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ , from_pt=snake_case__ )
__magic_name__ :Dict = fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
__magic_name__ :Any = VisionTextDualEncoderModel.from_pretrained(snake_case__ , from_flax=snake_case__ )
pt_model_loaded.to(snake_case__ )
pt_model_loaded.eval()
with torch.no_grad():
__magic_name__ :Union[str, Any] = pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(snake_case__ , pt_output_loaded.numpy() , 4E-2 )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
__magic_name__ :str = VisionTextDualEncoderModel(snake_case__ )
__magic_name__ :Optional[Any] = FlaxVisionTextDualEncoderModel(snake_case__ )
__magic_name__ :Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
__magic_name__ :Any = fx_state
self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
__magic_name__ :int = VisionTextDualEncoderModel(snake_case__ )
__magic_name__ :Optional[int] = FlaxVisionTextDualEncoderModel(snake_case__ )
__magic_name__ :Any = load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case__ )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.prepare_config_and_inputs()
self.check_save_load(**snake_case__ )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case__ )
@is_pt_flax_cross_test
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.prepare_config_and_inputs()
__magic_name__ :int = config_inputs_dict.pop('''vision_config''' )
__magic_name__ :Optional[int] = config_inputs_dict.pop('''text_config''' )
__magic_name__ :Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(snake_case__ , snake_case__ , snake_case__ )
self.check_equivalence_flax_to_pt(snake_case__ , snake_case__ , snake_case__ )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.get_pretrained_model_and_inputs()
__magic_name__ :List[str] = model_a(**snake_case__ )
__magic_name__ :List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case__ )
__magic_name__ :int = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
__magic_name__ :Any = model_a(**snake_case__ )
__magic_name__ :List[str] = after_outputs[0]
__magic_name__ :List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1E-5 )
@require_flax
class lowerCamelCase_ ( __UpperCAmelCase , unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , )
__magic_name__ :List[Any] = 1_3
__magic_name__ :Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__magic_name__ :Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__magic_name__ :Dict = random_attention_mask([batch_size, 4] )
__magic_name__ :Optional[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = FlaxViTModel(snake_case__ )
__magic_name__ :Optional[int] = FlaxBertModel(snake_case__ )
return vision_model, text_model
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = FlaxViTModelTester(self )
__magic_name__ :Union[str, Any] = FlaxBertModelTester(self )
__magic_name__ :Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
__magic_name__ :Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__magic_name__ :List[Any] = vision_config_and_inputs
__magic_name__ :int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCamelCase_ ( __UpperCAmelCase , unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , )
__magic_name__ :Any = 1_3
__magic_name__ :int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__magic_name__ :Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__magic_name__ :Union[str, Any] = random_attention_mask([batch_size, 4] )
__magic_name__ :Optional[int] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = FlaxCLIPVisionModel(snake_case__ )
__magic_name__ :int = FlaxBertModel(snake_case__ )
return vision_model, text_model
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = FlaxCLIPVisionModelTester(self )
__magic_name__ :List[str] = FlaxBertModelTester(self )
__magic_name__ :Any = clip_model_tester.prepare_config_and_inputs()
__magic_name__ :Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__magic_name__ :Tuple = vision_config_and_inputs
__magic_name__ :Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :str = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
__magic_name__ :List[str] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__magic_name__ :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ :Any = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=snake_case__ , padding=snake_case__ , return_tensors='''np''' )
__magic_name__ :Tuple = model(**snake_case__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__magic_name__ :Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , snake_case__ , atol=1E-3 ) )
| 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "donut-swin"
__A : Optional[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , snake_case__ : Any=2_2_4 , snake_case__ : Tuple=4 , snake_case__ : str=3 , snake_case__ : Dict=9_6 , snake_case__ : Optional[Any]=[2, 2, 6, 2] , snake_case__ : Any=[3, 6, 1_2, 2_4] , snake_case__ : List[str]=7 , snake_case__ : Dict=4.0 , snake_case__ : str=True , snake_case__ : Optional[int]=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Any=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Tuple=False , snake_case__ : int=0.02 , snake_case__ : Optional[Any]=1e-5 , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Union[str, Any] = image_size
lowercase :Optional[Any] = patch_size
lowercase :List[str] = num_channels
lowercase :Optional[int] = embed_dim
lowercase :Optional[Any] = depths
lowercase :List[Any] = len(snake_case__ )
lowercase :Optional[Any] = num_heads
lowercase :int = window_size
lowercase :str = mlp_ratio
lowercase :Optional[int] = qkv_bias
lowercase :Dict = hidden_dropout_prob
lowercase :Any = attention_probs_dropout_prob
lowercase :Any = drop_path_rate
lowercase :int = hidden_act
lowercase :int = use_absolute_embeddings
lowercase :List[str] = layer_norm_eps
lowercase :Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase :str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
| 677 | 0 |
from collections.abc import Sequence
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(a_ ) )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : List[str] = 0.0
for coeff in reversed(a_ ):
_lowerCAmelCase : Tuple = result * x + coeff
return result
if __name__ == "__main__":
snake_case = (0.0, 0.0, 5.0, 9.3, 7.0)
snake_case = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 424 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase (a_ :Optional[int] , a_ :tuple , a_ :Path , a_ :str , a_ :int , a_ :List[Any] , a_ :Any , a_ :Union[str, Any]=False , ) -> Dict:
output_path.parent.mkdir(parents=a_ , exist_ok=a_)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , )
else:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , )
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :int , a_ :bool = False) -> Union[str, Any]:
lowercase :Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase :Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''')
else:
lowercase :List[str] = '''cpu'''
lowercase :List[str] = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_).to(a_)
lowercase :List[Any] = Path(a_)
# TEXT ENCODER
lowercase :List[Any] = pipeline.text_encoder.config.max_position_embeddings
lowercase :Dict = pipeline.text_encoder.config.hidden_size
lowercase :Union[str, Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa)) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , )
del pipeline.text_encoder
# UNET
lowercase :Any = pipeline.unet.config.in_channels
lowercase :List[Any] = pipeline.unet.config.sample_size
lowercase :Optional[int] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a_ , a_ , a_).to(device=a_ , dtype=a_),
torch.randn(2).to(device=a_ , dtype=a_),
torch.randn(2 , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=a_ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , use_external_data_format=a_ , )
lowercase :List[Any] = str(unet_path.absolute().as_posix())
lowercase :str = os.path.dirname(a_)
lowercase :Optional[Any] = onnx.load(a_)
# clean up existing tensor files
shutil.rmtree(a_)
os.mkdir(a_)
# collate external tensor files into one
onnx.save_model(
a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location='''weights.pb''' , convert_attribute=a_ , )
del pipeline.unet
# VAE ENCODER
lowercase :Tuple = pipeline.vae
lowercase :Optional[Any] = vae_encoder.config.in_channels
lowercase :Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase :Any = lambda a_ , a_: vae_encoder.encode(a_ , a_)[0].sample()
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
# VAE DECODER
lowercase :Any = pipeline.vae
lowercase :Dict = vae_decoder.config.latent_channels
lowercase :Union[str, Any] = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase :List[Any] = vae_encoder.decode
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase :Dict = pipeline.safety_checker
lowercase :str = safety_checker.config.vision_config.num_channels
lowercase :str = safety_checker.config.vision_config.image_size
lowercase :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_),
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=a_ , )
del pipeline.safety_checker
lowercase :Tuple = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''')
lowercase :Optional[Any] = pipeline.feature_extractor
else:
lowercase :int = None
lowercase :Union[str, Any] = None
lowercase :Optional[int] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''') , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a_)
print('''ONNX pipeline saved to''' , a_)
del pipeline
del onnx_pipeline
lowercase :Tuple = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider='''CPUExecutionProvider''')
print('''ONNX pipeline is loadable''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCAmelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 677 | 0 |
'''simple docstring'''
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_SCREAMING_SNAKE_CASE : Optional[int] = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase_( snake_case : int="no" , snake_case : str = default_json_config_file , snake_case : bool = False ):
'''simple docstring'''
snake_case_ = Path(a_ )
path.parent.mkdir(parents=a_ , exist_ok=a_ )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
snake_case_ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
snake_case_ = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
snake_case_ = num_gpus
snake_case_ = False
if num_gpus > 1:
snake_case_ = '''MULTI_GPU'''
else:
snake_case_ = '''NO'''
elif is_xpu_available() and use_xpu:
snake_case_ = torch.xpu.device_count()
snake_case_ = num_xpus
snake_case_ = False
if num_xpus > 1:
snake_case_ = '''MULTI_XPU'''
else:
snake_case_ = '''NO'''
elif is_npu_available():
snake_case_ = torch.npu.device_count()
snake_case_ = num_npus
snake_case_ = False
if num_npus > 1:
snake_case_ = '''MULTI_NPU'''
else:
snake_case_ = '''NO'''
else:
snake_case_ = 0
snake_case_ = True
snake_case_ = 1
snake_case_ = '''NO'''
snake_case_ = ClusterConfig(**a_ )
config.to_json_file(a_ )
return path
def UpperCamelCase_( snake_case : List[Any] , snake_case : List[str] ):
'''simple docstring'''
snake_case_ = parser.add_parser("default" , parents=a_ , help=a_ , formatter_class=a_ )
parser.add_argument(
"--config_file" , default=a_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=a_ , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=a_ )
return parser
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
snake_case_ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 400 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase (a_ :List[Any] , a_ :Union[str, Any] , a_ :Tuple , a_ :List[str] , a_ :str=True , a_ :str="pt") -> List[str]:
lowercase :Optional[int] = {'''add_prefix_space''': True} if isinstance(a_ , a_) and not line.startswith(''' ''') else {}
lowercase :Optional[int] = padding_side
return tokenizer(
[line] , max_length=a_ , padding='''max_length''' if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , )
def lowerCamelCase (a_ :str , a_ :Tuple , a_ :Optional[Any]=None , ) -> Tuple:
lowercase :Optional[Any] = input_ids.ne(a_).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str="train" , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Dict="" , ):
'''simple docstring'''
super().__init__()
lowercase :Tuple = Path(snake_case__ ).joinpath(type_path + '''.source''' )
lowercase :Union[str, Any] = Path(snake_case__ ).joinpath(type_path + '''.target''' )
lowercase :List[Any] = self.get_char_lens(self.src_file )
lowercase :Tuple = max_source_length
lowercase :Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase :Any = tokenizer
lowercase :Tuple = prefix
if n_obs is not None:
lowercase :List[str] = self.src_lens[:n_obs]
lowercase :List[Any] = src_lang
lowercase :str = tgt_lang
def __len__( self : Any ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : str , snake_case__ : Any ):
'''simple docstring'''
lowercase :Optional[int] = index + 1 # linecache starts at 1
lowercase :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip('''\n''' )
lowercase :Dict = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase :Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
lowercase :Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
lowercase :Optional[int] = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''' )
lowercase :Tuple = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''' )
lowercase :List[str] = source_inputs['''input_ids'''].squeeze()
lowercase :Optional[Any] = target_inputs['''input_ids'''].squeeze()
lowercase :List[str] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __snake_case ( snake_case__ : Optional[int] ):
'''simple docstring'''
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowercase :str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :List[Any] = trim_batch(snake_case__ , snake_case__ )
lowercase , lowercase :List[str] = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
lowercase :Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase = getLogger(__name__)
def lowerCamelCase (a_ :List[List]) -> Tuple:
return list(itertools.chain.from_iterable(a_))
def lowerCamelCase (a_ :str) -> None:
lowercase :List[str] = get_git_info()
save_json(a_ , os.path.join(a_ , '''git_log.json'''))
def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=4 , **a_ :Optional[Any]) -> str:
with open(a_ , '''w''') as f:
json.dump(a_ , a_ , indent=a_ , **a_)
def lowerCamelCase (a_ :Dict) -> Union[str, Any]:
with open(a_) as f:
return json.load(a_)
def lowerCamelCase () -> List[str]:
lowercase :Dict = git.Repo(search_parent_directories=a_)
lowercase :int = {
'''repo_id''': str(a_),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase (a_ :Callable , a_ :Iterable) -> List:
return list(map(a_ , a_))
def lowerCamelCase (a_ :Optional[Any] , a_ :str) -> Any:
with open(a_ , '''wb''') as f:
return pickle.dump(a_ , a_)
def lowerCamelCase (a_ :List[str]) -> List[str]:
def remove_articles(a_ :Union[str, Any]):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , a_)
def white_space_fix(a_ :Tuple):
return " ".join(text.split())
def remove_punc(a_ :int):
lowercase :List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(a_ :int):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_))))
def lowerCamelCase (a_ :List[str] , a_ :Any) -> List[str]:
lowercase :Dict = normalize_answer(a_).split()
lowercase :int = normalize_answer(a_).split()
lowercase :List[Any] = Counter(a_) & Counter(a_)
lowercase :Optional[int] = sum(common.values())
if num_same == 0:
return 0
lowercase :str = 1.0 * num_same / len(a_)
lowercase :Tuple = 1.0 * num_same / len(a_)
lowercase :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase (a_ :Tuple , a_ :Optional[Any]) -> List[Any]:
return normalize_answer(a_) == normalize_answer(a_)
def lowerCamelCase (a_ :List[str] , a_ :List[str]) -> Dict:
assert len(a_) == len(a_)
lowercase :Any = 0
for hypo, pred in zip(a_ , a_):
em += exact_match_score(a_ , a_)
if len(a_) > 0:
em /= len(a_)
return {"em": em}
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
return model_prefix.startswith('''rag''')
def lowerCamelCase (a_ :List[str] , a_ :Tuple , a_ :List[str]) -> Any:
lowercase :List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase :str = '''dropout_rate'''
for p in extra_params:
if getattr(a_ , a_ , a_):
if not hasattr(a_ , a_) and not hasattr(a_ , equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(a_))
delattr(a_ , a_)
continue
lowercase :List[str] = p if hasattr(a_ , a_) else equivalent_param[p]
setattr(a_ , a_ , getattr(a_ , a_))
delattr(a_ , a_)
return hparams, config
| 677 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowercase__ =logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowercase__ ='cuda' if torch.cuda.is_available() else 'cpu'
def UpperCamelCase_ ( A__ , A__=1_00 , A__=" " ):
a_ = text.split(a_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(a_ ) , a_ )]
def UpperCamelCase_ ( A__ ):
a_ = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(a_ ):
titles.append(title if title is not None else """""" )
texts.append(a_ )
return {"title": titles, "text": texts}
def UpperCamelCase_ ( A__ , A__ , A__ ):
a_ = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=a_ , padding="""longest""" , return_tensors="""pt""" )['''input_ids''']
a_ = ctx_encoder(input_ids.to(device=a_ ) , return_dict=a_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase_ ( A__ , A__ , A__ , ):
######################################
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a_ = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a_ = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc )
# And compute the embeddings
a_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=a_ )
a_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a_ = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
a_ = dataset.map(
partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_ ) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , )
# And finally save your dataset
a_ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(a_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=a_ )
# And save the index
a_ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(a_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class a_ :
lowerCamelCase__ : str = field(
default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowerCamelCase__ : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowerCamelCase__ : str = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowerCamelCase__ : str = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class a_ :
lowerCamelCase__ : Optional[int] = field(
default=__UpperCAmelCase , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowerCamelCase__ : int = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class a_ :
lowerCamelCase__ : int = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowerCamelCase__ : int = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowercase__ =HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowercase__ , lowercase__ , lowercase__ =parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowercase__ =rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 263 |
"""simple docstring"""
def lowerCamelCase (a_ :Tuple , a_ :int , a_ :Tuple , a_ :List[Any]) -> str:
if height >= 1:
move_tower(height - 1 , a_ , a_ , a_)
move_disk(a_ , a_)
move_tower(height - 1 , a_ , a_ , a_)
def lowerCamelCase (a_ :int , a_ :Union[str, Any]) -> str:
print('''moving disk from''' , a_ , '''to''' , a_)
def lowerCamelCase () -> Tuple:
lowercase :int = int(input('''Height of hanoi: ''').strip())
move_tower(a_ , '''A''' , '''B''' , '''C''')
if __name__ == "__main__":
main()
| 677 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _UpperCAmelCase( __UpperCAmelCase ):
lowercase__ = "vit"
def __init__( self , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1e-12 , __a=2_24 , __a=16 , __a=3 , __a=True , __a=16 , **__a , ) -> List[str]:
'''simple docstring'''
super().__init__(**snake_case__)
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = qkv_bias
_UpperCamelCase = encoder_stride
class _UpperCAmelCase( __UpperCAmelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 1e-4
| 19 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCAmelCase = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCAmelCase = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __snake_case ( self : List[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : str=None , snake_case__ : List[Any]="uniform_average" , snake_case__ : Dict=True ):
'''simple docstring'''
lowercase :Dict = mean_squared_error(
snake_case__ , snake_case__ , sample_weight=snake_case__ , multioutput=snake_case__ , squared=snake_case__ )
return {"mse": mse}
| 677 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__A = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :Optional[int] = field(
default=128 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
_UpperCAmelCase :bool = field(
default=__UpperCAmelCase ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_UpperCAmelCase :bool = field(
default=__UpperCAmelCase ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
_UpperCAmelCase :Optional[int] = field(
default=__UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
_UpperCAmelCase :Optional[int] = field(
default=__UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
_UpperCAmelCase :Optional[int] = field(
default=__UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
default=__UpperCAmelCase ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase :str = field(
default=__UpperCAmelCase ,metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
_UpperCAmelCase :Optional[str] = field(
default=__UpperCAmelCase ,metadata={"help": "Train language if it is different from the evaluation language."} )
_UpperCAmelCase :Optional[str] = field(
default=__UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=__UpperCAmelCase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=__UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
_UpperCAmelCase :Optional[bool] = field(
default=__UpperCAmelCase ,metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} ,)
_UpperCAmelCase :bool = field(
default=__UpperCAmelCase ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
_UpperCAmelCase :str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase :bool = field(
default=__UpperCAmelCase ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
_UpperCAmelCase :bool = field(
default=__UpperCAmelCase ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def SCREAMING_SNAKE_CASE__ ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__: Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__: Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , a_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__: Optional[int] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__: Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase__: str = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__: List[Any] = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: Tuple = train_dataset.features['''label'''].names
if training_args.do_eval:
lowercase__: Optional[int] = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: Union[str, Any] = eval_dataset.features['''label'''].names
if training_args.do_predict:
lowercase__: List[Any] = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: int = predict_dataset.features['''label'''].names
# Labels
lowercase__: Dict = len(a_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__: Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , idalabel={str(a_ ): label for i, label in enumerate(a_ )} , labelaid={label: i for i, label in enumerate(a_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase__: Optional[int] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__: Any = False
def preprocess_function(__UpperCAmelCase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=a_ , max_length=data_args.max_seq_length , truncation=a_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__: List[Any] = min(len(a_ ) , data_args.max_train_samples )
lowercase__: List[str] = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowercase__: int = train_dataset.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__: int = min(len(a_ ) , data_args.max_eval_samples )
lowercase__: Dict = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowercase__: Any = eval_dataset.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase__: List[Any] = min(len(a_ ) , data_args.max_predict_samples )
lowercase__: Union[str, Any] = predict_dataset.select(range(a_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowercase__: Tuple = predict_dataset.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowercase__: Optional[Any] = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
lowercase__: int = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
lowercase__: List[str] = np.argmax(a_ , axis=1 )
return metric.compute(predictions=a_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__: Optional[int] = default_data_collator
elif training_args.fpaa:
lowercase__: List[Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
lowercase__: Tuple = None
# Initialize our Trainer
lowercase__: Union[str, Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
lowercase__: List[str] = None
if training_args.resume_from_checkpoint is not None:
lowercase__: Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__: Dict = last_checkpoint
lowercase__: List[Any] = trainer.train(resume_from_checkpoint=a_ )
lowercase__: int = train_result.metrics
lowercase__: Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
lowercase__: Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__: int = trainer.evaluate(eval_dataset=a_ )
lowercase__: Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
lowercase__: Tuple = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowercase__: Tuple = trainer.predict(a_ , metric_key_prefix='''predict''' )
lowercase__: Union[str, Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(a_ )
)
lowercase__: Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''predict''' , a_ )
trainer.save_metrics('''predict''' , a_ )
lowercase__: Optional[int] = np.argmax(a_ , axis=1 )
lowercase__: Optional[Any] = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
lowercase__: Optional[Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 586 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def __snake_case ( snake_case__ : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError()
| 677 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
A__ : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : List[str] = False
A__ : int = False
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=False ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
snake_case : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCAmelCase ( __UpperCAmelCase ):
def __init__(self : Any , snake_case__ : Dict , snake_case__ : Dict=13 , snake_case__ : Tuple=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Any=99 , snake_case__ : Optional[Any]=32 , snake_case__ : Optional[Any]=32 , snake_case__ : Any=2 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[Any]=5_12 , snake_case__ : List[str]=16 , snake_case__ : Union[str, Any]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=4 , snake_case__ : int=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = parent
snake_case : Tuple = batch_size
snake_case : Optional[Any] = seq_length
snake_case : Optional[Any] = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : List[Any] = use_token_type_ids
snake_case : str = use_labels
snake_case : List[str] = vocab_size
snake_case : str = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : List[Any] = type_vocab_size
snake_case : Union[str, Any] = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : Any = num_labels
snake_case : int = num_choices
snake_case : Dict = scope
snake_case : Dict = embedding_size
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : int = None
if self.use_input_mask:
snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Tuple = None
if self.use_token_type_ids:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Union[str, Any] = None
snake_case : int = None
snake_case : str = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Optional[int] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : Dict = TFMobileBertModel(config=snake_case__ )
snake_case : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : List[Any] = model(snake_case__ )
snake_case : Optional[int] = [input_ids, input_mask]
snake_case : Optional[int] = model(snake_case__ )
snake_case : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TFMobileBertForMaskedLM(config=snake_case__ )
snake_case : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = TFMobileBertForNextSentencePrediction(config=snake_case__ )
snake_case : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : int = TFMobileBertForPreTraining(config=snake_case__ )
snake_case : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : List[Any] = model(snake_case__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = self.num_labels
snake_case : List[Any] = TFMobileBertForSequenceClassification(config=snake_case__ )
snake_case : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Tuple = self.num_choices
snake_case : Any = TFMobileBertForMultipleChoice(config=snake_case__ )
snake_case : Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = self.num_labels
snake_case : List[str] = TFMobileBertForTokenClassification(config=snake_case__ )
snake_case : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = TFMobileBertForQuestionAnswering(config=snake_case__ )
snake_case : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : Dict = self.prepare_config_and_inputs()
(
snake_case
) : Dict = config_and_inputs
snake_case : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
snake_case : List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
snake_case : List[str] = TFMobileBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]:
'''simple docstring'''
snake_case : int = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
snake_case : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case : List[Any] = model(snake_case__ )[0]
snake_case : Union[str, Any] = [1, 6, 3_05_22]
self.assertEqual(output.shape , snake_case__ )
snake_case : Optional[int] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 204 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Any=30 , lowerCamelCase__ :List[Any]=2 , lowerCamelCase__ :Optional[Any]=3 , lowerCamelCase__ :int=True , lowerCamelCase__ :Tuple=True , lowerCamelCase__ :Optional[int]=32 , lowerCamelCase__ :str=5 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Tuple=37 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :Optional[Any]=0.1 , lowerCamelCase__ :int=10 , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Dict=3 , lowerCamelCase__ :List[Any]=0.6 , lowerCamelCase__ :List[str]=None , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :List[str] = batch_size
UpperCamelCase__ :str = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = num_channels
UpperCamelCase__ :int = is_training
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :Union[str, Any] = intermediate_size
UpperCamelCase__ :str = hidden_act
UpperCamelCase__ :Dict = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :List[str] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :Optional[int] = mask_ratio
UpperCamelCase__ :Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ :Tuple = (image_size // patch_size) ** 2
UpperCamelCase__ :Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __a ( self :Dict ):
UpperCamelCase__ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :List[Any] = None
if self.use_labels:
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Any = self.get_config()
return config, pixel_values, labels
def __a ( self :Union[str, Any] ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __a ( self :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = ViTMAEModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase__ :List[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int ):
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(snake_case__ )
UpperCamelCase__ :Tuple = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ :int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase__ :List[Any] = 1
UpperCamelCase__ :int = ViTMAEForPreTraining(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase__ :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :List[Any] = model(snake_case__ )
UpperCamelCase__ :Dict = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = self.prepare_config_and_inputs()
UpperCamelCase__ :Tuple = config_and_inputs
UpperCamelCase__ :Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_snake_case : Tuple = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
_snake_case : Dict = False
_snake_case : Optional[int] = False
_snake_case : Union[str, Any] = False
_snake_case : Any = False
def __a ( self :Any ):
UpperCamelCase__ :Any = ViTMAEModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def __a ( self :Tuple ):
pass
def __a ( self :str ):
UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :int = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Tuple = model_class(snake_case__ )
UpperCamelCase__ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :str = [*signature.parameters.keys()]
UpperCamelCase__ :List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self :str ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any ):
np.random.seed(2 )
UpperCamelCase__ :Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase__ :Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ :int = torch.from_numpy(snake_case__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ :List[Any] = pt_noise
super().check_pt_tf_models(snake_case__ , snake_case__ , snake_case__ )
def __a ( self :Any ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ :Union[str, Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCamelCase__ :Any = outputs[0].cpu().numpy()
UpperCamelCase__ :Any = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
UpperCamelCase__ :str = model_class.from_pretrained(snake_case__ )
model.to(snake_case__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ :List[str] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
# Make sure we don't have nans
UpperCamelCase__ :str = after_outputs[0].cpu().numpy()
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __a ( self :int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __a ( self :Union[str, Any] ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __a ( self :Tuple ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def __a ( self :str ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __a ( self :str ):
pass
@slow
def __a ( self :List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Optional[int] = ViTMAEModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def A ( ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :List[Any] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
np.random.seed(2 )
UpperCamelCase__ :Union[str, Any] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(snake_case__ )
UpperCamelCase__ :Tuple = self.default_image_processor
UpperCamelCase__ :Optional[int] = prepare_img()
UpperCamelCase__ :Any = image_processor(images=snake_case__ , return_tensors="""pt""" ).to(snake_case__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ :Optional[int] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase__ :Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase__ :int = model(**snake_case__ , noise=torch.from_numpy(snake_case__ ).to(device=snake_case__ ) )
# verify the logits
UpperCamelCase__ :Dict = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCamelCase__ :str = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case__ ) , atol=1e-4 ) ) | 45 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : int=3_2 , snake_case__ : int=3 , snake_case__ : str=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : int=[1, 1, 2, 1] , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase :Union[str, Any] = parent
lowercase :Optional[Any] = batch_size
lowercase :Dict = image_size
lowercase :Any = num_channels
lowercase :List[str] = embeddings_size
lowercase :Union[str, Any] = hidden_sizes
lowercase :Any = depths
lowercase :Dict = is_training
lowercase :Any = use_labels
lowercase :Any = hidden_act
lowercase :List[str] = num_labels
lowercase :List[Any] = scope
lowercase :int = len(snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :Union[str, Any] = self.get_config()
return config, pixel_values
def __snake_case ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Any = FlaxRegNetModel(config=snake_case__ )
lowercase :str = model(snake_case__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.num_labels
lowercase :str = FlaxRegNetForImageClassification(config=snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = self.prepare_config_and_inputs()
lowercase , lowercase :Tuple = config_and_inputs
lowercase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__A : str = False
__A : Tuple = False
__A : Dict = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Dict = FlaxRegNetModelTester(self )
lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(snake_case__ )
lowercase :int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Tuple = [*signature.parameters.keys()]
lowercase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowercase :int = model_class(snake_case__ )
lowercase :Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase :Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase :Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase :List[Any] = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ : str , **snake_case__ : Optional[int] ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest('''JIT Enabled''' ):
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase () -> Tuple:
lowercase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_flax
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase :Optional[Any] = self.default_image_processor
lowercase :Dict = prepare_img()
lowercase :Any = image_processor(images=snake_case__ , return_tensors='''np''' )
lowercase :List[str] = model(**snake_case__ )
# verify the logits
lowercase :Any = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :List[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 677 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_05_22, type=int)
lowercase_ = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, 'rb') as fp:
lowercase_ = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
lowercase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase_ = [0] * args.vocab_size
for k, v in counter.items():
lowercase_ = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 552 |
"""simple docstring"""
UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase (a_ :dict , a_ :List[str] , a_ :Tuple) -> list[str]:
lowercase :str = set()
# keep track of all the paths to be checked
lowercase :Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase :Optional[int] = queue.pop(0)
# get the last node from the path
lowercase :Any = path[-1]
if node not in explored:
lowercase :int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase :List[Any] = list(a_)
new_path.append(a_)
queue.append(a_)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a_)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase (a_ :dict , a_ :List[Any] , a_ :List[Any]) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase :List[str] = [start]
lowercase :Optional[Any] = set(a_)
# Keep tab on distances from `start` node.
lowercase :Union[str, Any] = {start: 0, target: -1}
while queue:
lowercase :Union[str, Any] = queue.pop(0)
if node == target:
lowercase :Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a_)
queue.append(a_)
lowercase :Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 677 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/mt5-small" )
_SCREAMING_SNAKE_CASE = tokenizer("Hello there" , return_tensors="tf" ).input_ids
_SCREAMING_SNAKE_CASE = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
_SCREAMING_SNAKE_CASE = model(snake_case__ , labels=snake_case__ ).loss
_SCREAMING_SNAKE_CASE = -tf.math.reduce_mean(snake_case__ ).numpy()
_SCREAMING_SNAKE_CASE = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 418 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase (a_ :str , a_ :List[str]=100 , a_ :Optional[Any]=" ") -> List[str]:
lowercase :str = text.split(a_)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(a_) , a_)]
def lowerCamelCase (a_ :dict) -> dict:
lowercase , lowercase :str = [], []
for title, text in zip(documents['''title'''] , documents['''text''']):
if text is not None:
for passage in split_text(a_):
titles.append(title if title is not None else '''''')
texts.append(a_)
return {"title": titles, "text": texts}
def lowerCamelCase (a_ :dict , a_ :DPRContextEncoder , a_ :DPRContextEncoderTokenizerFast) -> dict:
lowercase :Tuple = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''')['''input_ids''']
lowercase :Optional[Any] = ctx_encoder(input_ids.to(device=a_) , return_dict=a_).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase (a_ :"RagExampleArguments" , a_ :"ProcessingArguments" , a_ :"IndexHnswArguments" , ) -> Any:
######################################
logger.info('''Step 1 - Create the dataset''')
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase :List[Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase :Optional[Any] = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase :str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=a_)
lowercase :Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase :str = Features(
{'''text''': Value('''string'''), '''title''': Value('''string'''), '''embeddings''': Sequence(Value('''float32'''))}) # optional, save as float32 instead of float64 to save space
lowercase :Optional[Any] = dataset.map(
partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , )
# And finally save your dataset
lowercase :str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''')
dataset.save_to_disk(a_)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''')
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase :str = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('''embeddings''' , custom_index=a_)
# And save the index
lowercase :Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''')
dataset.get_index('''embeddings''').save(a_)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __magic_name__ :
__A : str = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
__A : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
__A : Optional[str] = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class __magic_name__ :
__A : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
__A : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class __magic_name__ :
__A : int = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
__A : int = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 677 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class a ( __UpperCAmelCase ):
'''simple docstring'''
__lowerCAmelCase : Optional[int] = ["input_features", "is_longer"]
def __init__( self , lowerCamelCase_=6_4 , lowerCamelCase_=4_8_0_0_0 , lowerCamelCase_=4_8_0 , lowerCamelCase_=1_0 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_ = 0 , lowerCamelCase_ = 1_4_0_0_0 , lowerCamelCase_ = None , lowerCamelCase_ = "fusion" , lowerCamelCase_ = "repeatpad" , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(
feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
_a : Optional[int] = top_db
_a : Any = truncation
_a : Optional[Any] = padding
_a : Any = fft_window_size
_a : Optional[Any] = (fft_window_size >> 1) + 1
_a : int = hop_length
_a : Any = max_length_s
_a : str = max_length_s * sampling_rate
_a : str = sampling_rate
_a : Dict = frequency_min
_a : List[str] = frequency_max
_a : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm=snake_case__ , mel_scale='htk' , )
_a : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm='slaney' , mel_scale='slaney' , )
def __UpperCamelCase ( self ) -> List[str]:
_a : List[Any] = copy.deepcopy(self.__dict__ )
_a : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> int:
_a : Tuple = spectrogram(
snake_case__ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case__ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_a : Dict = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_a : List[str] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_a : List[str] = [0]
# randomly choose index for each part
_a : str = np.random.choice(ranges[0] )
_a : Any = np.random.choice(ranges[1] )
_a : List[str] = np.random.choice(ranges[2] )
_a : Any = mel[idx_front : idx_front + chunk_frames, :]
_a : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
_a : Optional[int] = mel[idx_back : idx_back + chunk_frames, :]
_a : Tuple = torch.tensor(mel[None, None, :] )
_a : Union[str, Any] = torch.nn.functional.interpolate(
snake_case__ , size=[chunk_frames, 6_4] , mode='bilinear' , align_corners=snake_case__ )
_a : Union[str, Any] = mel_shrink[0][0].numpy()
_a : List[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_a : List[str] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_a : str = len(snake_case__ ) - max_length
_a : Optional[Any] = np.random.randint(0 , overflow + 1 )
_a : List[str] = waveform[idx : idx + max_length]
_a : Union[str, Any] = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_a : int = self._np_extract_fbank_features(snake_case__ , self.mel_filters )
_a : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_a : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_a : List[str] = np.stack([mel, mel, mel, mel] , axis=0 )
_a : Dict = False
else:
_a : Union[str, Any] = self._random_mel_fusion(snake_case__ , snake_case__ , snake_case__ )
_a : Dict = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_a : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_a : List[str] = int(max_length / len(snake_case__ ) )
_a : Optional[Any] = np.stack(np.tile(snake_case__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_a : Optional[Any] = int(max_length / len(snake_case__ ) )
_a : int = np.stack(np.tile(snake_case__ , snake_case__ ) )
_a : Optional[int] = np.pad(snake_case__ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_a : str = self._np_extract_fbank_features(snake_case__ , self.mel_filters )
_a : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_a : Any = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> int:
_a : Tuple = truncation if truncation is not None else self.truncation
_a : Tuple = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a : Union[str, Any] = isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_a : List[str] = is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a : List[Any] = [np.asarray(snake_case__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
_a : str = np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : Optional[int] = [np.asarray(snake_case__ )]
# convert to mel spectrogram, truncate and pad if needed.
_a : str = [
self._get_input_mel(snake_case__ , max_length if max_length else self.nb_max_samples , snake_case__ , snake_case__ )
for waveform in raw_speech
]
_a : Optional[int] = []
_a : Union[str, Any] = []
for mel, longer in padded_inputs:
input_mel.append(snake_case__ )
is_longer.append(snake_case__ )
if truncation == "fusion" and sum(snake_case__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_a : Dict = np.random.randint(0 , len(snake_case__ ) )
_a : Tuple = True
if isinstance(input_mel[0] , snake_case__ ):
_a : Union[str, Any] = [np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_a : List[Any] = [[longer] for longer in is_longer]
_a : List[str] = {'''input_features''': input_mel, '''is_longer''': is_longer}
_a : Dict = BatchFeature(snake_case__ )
if return_tensors is not None:
_a : Optional[int] = input_features.convert_to_tensors(snake_case__ )
return input_features
| 120 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = None
__magic_name__ :Dict = None
__magic_name__ :Optional[int] = graph
self._normalize_graph(snake_case__ , snake_case__ )
__magic_name__ :str = len(snake_case__ )
__magic_name__ :Optional[Any] = None
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
__magic_name__ :List[str] = [sources]
if sinks is int:
__magic_name__ :str = [sinks]
if len(snake_case__ ) == 0 or len(snake_case__ ) == 0:
return
__magic_name__ :Optional[int] = sources[0]
__magic_name__ :List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(snake_case__ ) > 1 or len(snake_case__ ) > 1:
__magic_name__ :int = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__magic_name__ :Any = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__magic_name__ :Union[str, Any] = max_input_flow
__magic_name__ :Optional[Any] = 0
__magic_name__ :Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__magic_name__ :List[Any] = max_input_flow
__magic_name__ :List[str] = size - 1
def A ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = algorithm(self )
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = flow_network
__magic_name__ :str = flow_network.verticesCount
__magic_name__ :Optional[int] = flow_network.sourceIndex
__magic_name__ :Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__magic_name__ :Union[str, Any] = flow_network.graph
__magic_name__ :str = False
def A ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
__magic_name__ :Tuple = True
def A ( self ):
"""simple docstring"""
pass
class lowerCamelCase_ ( __UpperCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(snake_case__ )
# use this to save your result
__magic_name__ :List[str] = -1
def A ( self ):
"""simple docstring"""
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class lowerCamelCase_ ( __UpperCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(snake_case__ )
__magic_name__ :Union[str, Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
__magic_name__ :Union[str, Any] = [0] * self.verticies_count
__magic_name__ :Optional[Any] = [0] * self.verticies_count
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__magic_name__ :int = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__magic_name__ :int = 0
while i < len(snake_case__ ):
__magic_name__ :Any = vertices_list[i]
__magic_name__ :str = self.heights[vertex_index]
self.process_vertex(snake_case__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(snake_case__ ) )
__magic_name__ :str = 0
else:
i += 1
__magic_name__ :List[str] = sum(self.preflow[self.source_index] )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(snake_case__ , snake_case__ )
self.relabel(snake_case__ )
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__magic_name__ :str = self.heights[to_index]
if min_height is not None:
__magic_name__ :Optional[Any] = min_height + 1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = [0]
SCREAMING_SNAKE_CASE__ : Optional[int] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
SCREAMING_SNAKE_CASE__ : Dict = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
SCREAMING_SNAKE_CASE__ : List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
SCREAMING_SNAKE_CASE__ : List[Any] = flow_network.find_maximum_flow()
print(f"maximum flow is {maximum_flow}")
| 0 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ :
def __init__( self : Tuple , snake_case__ : str = None , snake_case__ : uuid.UUID = None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None ):
'''simple docstring'''
if not conversation_id:
lowercase :List[Any] = uuid.uuida()
if past_user_inputs is None:
lowercase :Union[str, Any] = []
if generated_responses is None:
lowercase :List[str] = []
lowercase :uuid.UUID = conversation_id
lowercase :List[str] = past_user_inputs
lowercase :List[str] = generated_responses
lowercase :Optional[str] = text
def __eq__( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __snake_case ( self : Optional[int] , snake_case__ : str , snake_case__ : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
lowercase :List[str] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowercase :Optional[int] = text
def __snake_case ( self : Any ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase :Tuple = None
def __snake_case ( self : Tuple , snake_case__ : str ):
'''simple docstring'''
self.generated_responses.append(snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict ):
'''simple docstring'''
lowercase :int = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowercase :Dict = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__UpperCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if self.tokenizer.pad_token_id is None:
lowercase :Any = self.tokenizer.eos_token
def __snake_case ( self : List[Any] , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :str = {}
lowercase :List[str] = {}
lowercase :Tuple = {}
if min_length_for_response is not None:
lowercase :Dict = min_length_for_response
if minimum_tokens is not None:
lowercase :Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
lowercase :List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase :Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , snake_case__ : Union[Conversation, List[Conversation]] , snake_case__ : int=0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :int = super().__call__(snake_case__ , num_workers=snake_case__ , **snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1:
return outputs[0]
return outputs
def __snake_case ( self : List[Any] , snake_case__ : Conversation , snake_case__ : Any=3_2 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowercase :List[str] = self.tokenizer._build_conversation_input_ids(snake_case__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase :List[str] = self._legacy_parse_and_tokenize(snake_case__ )
if self.framework == "pt":
lowercase :int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase :Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __snake_case ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Any=1_0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowercase :Optional[Any] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowercase :int = max_length - minimum_tokens
lowercase :int = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowercase :int = model_inputs['''attention_mask'''][:, -trim:]
lowercase :int = model_inputs.pop('''conversation''' )
lowercase :Union[str, Any] = max_length
lowercase :Dict = self.model.generate(**snake_case__ , **snake_case__ )
if self.model.config.is_encoder_decoder:
lowercase :List[Any] = 1
else:
lowercase :Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any]=True ):
'''simple docstring'''
lowercase :Dict = model_outputs['''output_ids''']
lowercase :Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , )
lowercase :Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(snake_case__ )
return conversation
def __snake_case ( self : List[Any] , snake_case__ : Conversation ):
'''simple docstring'''
lowercase :str = self.tokenizer.eos_token_id
lowercase :List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
if len(snake_case__ ) > self.tokenizer.model_max_length:
lowercase :List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 677 | 0 |
import warnings
from .generation import TFGenerationMixin
class __A ( __UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' ,__UpperCAmelCase ,)
| 424 |
"""simple docstring"""
def lowerCamelCase (a_ :int = 100) -> int:
lowercase :Union[str, Any] = set()
lowercase :List[Any] = 0
lowercase :Dict = n + 1 # maximum limit
for a in range(2 , a_):
for b in range(2 , a_):
lowercase :Tuple = a**b # calculates the current power
collect_powers.add(a_) # adds the result to the set
return len(a_)
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 677 | 0 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_SCREAMING_SNAKE_CASE : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def UpperCamelCase_( snake_case : Optional[int] , snake_case : tuple , snake_case : Path , snake_case : str , snake_case : int , snake_case : List[Any] , snake_case : Any , snake_case : Union[str, Any]=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=a_ , exist_ok=a_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , )
else:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , )
@torch.no_grad()
def UpperCamelCase_( snake_case : str , snake_case : str , snake_case : int , snake_case : bool = False ):
'''simple docstring'''
snake_case_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case_ = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
snake_case_ = '''cpu'''
snake_case_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_ ).to(a_ )
snake_case_ = Path(a_ )
# TEXT ENCODER
snake_case_ = pipeline.text_encoder.config.max_position_embeddings
snake_case_ = pipeline.text_encoder.config.hidden_size
snake_case_ = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=a_ , )
del pipeline.text_encoder
# UNET
snake_case_ = pipeline.unet.config.in_channels
snake_case_ = pipeline.unet.config.sample_size
snake_case_ = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a_ , a_ , a_ ).to(device=a_ , dtype=a_ ),
torch.randn(2 ).to(device=a_ , dtype=a_ ),
torch.randn(2 , a_ , a_ ).to(device=a_ , dtype=a_ ),
False,
) , output_path=a_ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=a_ , use_external_data_format=a_ , )
snake_case_ = str(unet_path.absolute().as_posix() )
snake_case_ = os.path.dirname(a_ )
snake_case_ = onnx.load(a_ )
# clean up existing tensor files
shutil.rmtree(a_ )
os.mkdir(a_ )
# collate external tensor files into one
onnx.save_model(
a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location="weights.pb" , convert_attribute=a_ , )
del pipeline.unet
# VAE ENCODER
snake_case_ = pipeline.vae
snake_case_ = vae_encoder.config.in_channels
snake_case_ = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
snake_case_ = lambda snake_case , snake_case : vae_encoder.encode(a_ , a_ )[0].sample()
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_ ).to(device=a_ , dtype=a_ ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=a_ , )
# VAE DECODER
snake_case_ = pipeline.vae
snake_case_ = vae_decoder.config.latent_channels
snake_case_ = vae_decoder.config.out_channels
# forward only through the decoder part
snake_case_ = vae_encoder.decode
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_ ).to(device=a_ , dtype=a_ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=a_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
snake_case_ = pipeline.safety_checker
snake_case_ = safety_checker.config.vision_config.num_channels
snake_case_ = safety_checker.config.vision_config.image_size
snake_case_ = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_ ),
torch.randn(1 , a_ , a_ , a_ ).to(device=a_ , dtype=a_ ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=a_ , )
del pipeline.safety_checker
snake_case_ = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
snake_case_ = pipeline.feature_extractor
else:
snake_case_ = None
snake_case_ = None
snake_case_ = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a_ )
print("ONNX pipeline saved to" , a_ )
del pipeline
del onnx_pipeline
snake_case_ = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 400 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[Any] = "xlm-prophetnet"
__A : List[str] = ["past_key_values"]
__A : int = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : Any , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[Union[str, Callable]] = "gelu" , snake_case__ : Optional[int] = 3_0_5_2_2 , snake_case__ : Optional[int] = 1_0_2_4 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[int] = 5_1_2 , snake_case__ : Optional[float] = 0.02 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 2 , snake_case__ : Optional[int] = 3_2 , snake_case__ : Optional[int] = 1_2_8 , snake_case__ : Optional[bool] = False , snake_case__ : Optional[float] = 0.0 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 1 , snake_case__ : Optional[int] = 2 , **snake_case__ : List[str] , ):
'''simple docstring'''
lowercase :Tuple = vocab_size
lowercase :Optional[int] = hidden_size
lowercase :Optional[int] = encoder_ffn_dim
lowercase :Optional[int] = num_encoder_layers
lowercase :Dict = num_encoder_attention_heads
lowercase :List[str] = decoder_ffn_dim
lowercase :Dict = num_decoder_layers
lowercase :List[Any] = num_decoder_attention_heads
lowercase :Optional[int] = max_position_embeddings
lowercase :Tuple = init_std # Normal(0, this parameter)
lowercase :int = activation_function
# parameters for xlmprophetnet
lowercase :Dict = ngram
lowercase :Optional[Any] = num_buckets
lowercase :Dict = relative_max_distance
lowercase :List[Any] = disable_ngram_loss
lowercase :Optional[Any] = eps
# 3 Types of Dropout
lowercase :Any = attention_dropout
lowercase :List[str] = activation_dropout
lowercase :List[str] = dropout
lowercase :List[str] = use_cache
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 677 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class a_ ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
a_ = inspect.getfile(accelerate.test_utils )
a_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
a_ = test_metrics
@require_cpu
def lowerCAmelCase__ ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowerCAmelCase__ ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCAmelCase__ ( self ):
self.test_metrics.main()
@require_multi_gpu
def lowerCAmelCase__ ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
a_ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _UpperCAmelCase( tf.keras.layers.Layer ):
def __init__( self , __a , __a , __a = None , __a = None) -> Optional[int]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = pad_token_id
_UpperCamelCase = max_length
_UpperCamelCase = vocab
_UpperCamelCase = merges
_UpperCamelCase = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__)
@classmethod
def UpperCAmelCase ( cls , __a , *__a , **__a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [''' '''.join(snake_case__) for m in tokenizer.bpe_ranks.keys()]
_UpperCamelCase = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__)
@classmethod
def UpperCAmelCase ( cls , __a , *__a , **__a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__)
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__)
@classmethod
def UpperCAmelCase ( cls , __a) -> Any:
'''simple docstring'''
return cls(**snake_case__)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase ( self , __a , __a = None) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.tf_tokenizer(snake_case__)
_UpperCamelCase = tf.ones_like(snake_case__)
if self.pad_token_id is not None:
# pad the tokens up to max length
_UpperCamelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
_UpperCamelCase = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 19 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = ["image_processor", "tokenizer"]
__A : Dict = "BlipImageProcessor"
__A : Dict = "AutoTokenizer"
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str ):
'''simple docstring'''
lowercase :Dict = False
super().__init__(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = self.image_processor
def __call__( self : Optional[int] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase :List[Any] = self.tokenizer
lowercase :str = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowercase :Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowercase :int = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowercase :Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def __snake_case ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def __snake_case ( self : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.tokenizer.model_input_names
lowercase :List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 677 | 0 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__A = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__A = logging.get_logger(__name__)
class UpperCAmelCase (__UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "maskformer"
_UpperCAmelCase :Tuple = {"hidden_size": "mask_feature_size"}
_UpperCAmelCase :Optional[Any] = ["resnet", "swin"]
_UpperCAmelCase :List[Any] = ["detr"]
def __init__( self , _UpperCAmelCase = 256 , _UpperCAmelCase = 256 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = 1.0 , _UpperCAmelCase = 20.0 , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__: int = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(snake_case__ , snake_case__ ):
lowercase__: Optional[Any] = backbone_config.pop('''model_type''' )
lowercase__: Tuple = CONFIG_MAPPING[backbone_model_type]
lowercase__: Tuple = config_class.from_dict(snake_case__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__: Optional[int] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__: Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(snake_case__ , snake_case__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {','.join(self.decoders_supported )}""" )
if isinstance(snake_case__ , snake_case__ ):
lowercase__: Dict = CONFIG_MAPPING[decoder_type]
lowercase__: str = config_class.from_dict(snake_case__ )
lowercase__: Any = backbone_config
lowercase__: Any = decoder_config
# main feature dimension for the model
lowercase__: Any = fpn_feature_size
lowercase__: Union[str, Any] = mask_feature_size
# initializer
lowercase__: Tuple = init_std
lowercase__: List[Any] = init_xavier_std
# Hungarian matcher && loss
lowercase__: Union[str, Any] = cross_entropy_weight
lowercase__: Union[str, Any] = dice_weight
lowercase__: Union[str, Any] = mask_weight
lowercase__: Optional[Any] = use_auxiliary_loss
lowercase__: Optional[int] = no_object_weight
lowercase__: Tuple = output_auxiliary_logits
lowercase__: Tuple = self.decoder_config.encoder_attention_heads
lowercase__: int = self.decoder_config.num_hidden_layers
super().__init__(**snake_case__ )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(
backbone_config=snake_case__ , decoder_config=snake_case__ , **snake_case__ , )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.backbone_config.to_dict()
lowercase__: Optional[int] = self.decoder_config.to_dict()
lowercase__: List[Any] = self.__class__.model_type
return output
| 586 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( __UpperCAmelCase ):
@require_torch
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[Any] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Any = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :Tuple = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :Any = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :List[str] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Dict = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :List[Any] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :str = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :str = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase :Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase :Optional[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :Union[str, Any] = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase :Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :Any = '''1'''
lowercase :Optional[Any] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Dict = '''
from transformers import pipeline
'''
lowercase :Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase :Dict = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase :Tuple = self.get_env()
lowercase :Optional[Any] = '''1'''
lowercase :Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = '''
from transformers import AutoModel
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :List[str] = self.get_env()
lowercase :Optional[int] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :Tuple = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 677 | 0 |
import os
def UpperCamelCase ( ):
snake_case : Any = os.path.join(os.path.dirname(a_ ) , "num.txt" )
with open(a_ ) as file_hand:
return str(sum(int(a_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 204 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger()
@dataclass
class __magic_name__ :
__A : nn.Module
__A : List[nn.Module] = field(default_factory=__UpperCAmelCase )
__A : list = field(default_factory=__UpperCAmelCase )
def __snake_case ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tensor , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self : int , snake_case__ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def __snake_case ( self : int ):
'''simple docstring'''
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
__A : nn.Module
__A : nn.Module
__A : int = 0
__A : List = field(default_factory=__UpperCAmelCase )
__A : List = field(default_factory=__UpperCAmelCase )
def __call__( self : Dict , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :Dict = Tracker(self.dest )(snake_case__ ).parametrized
lowercase :Optional[Any] = Tracker(self.src )(snake_case__ ).parametrized
lowercase :List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
lowercase :Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while"""
f""" destination module has {len(snake_case__ )}.""" )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCamelCase (a_ :str , a_ :ResNetConfig , a_ :Path , a_ :bool = True) -> Optional[Any]:
print(F"""Converting {name}...""")
with torch.no_grad():
lowercase :Union[str, Any] = timm.create_model(a_ , pretrained=a_).eval()
lowercase :Tuple = ResNetForImageClassification(a_).eval()
lowercase :int = ModuleTransfer(src=a_ , dest=a_)
lowercase :List[Any] = torch.randn((1, 3, 224, 224))
module_transfer(a_)
assert torch.allclose(from_model(a_) , our_model(a_).logits), "The model logits don't match the original one."
lowercase :List[Any] = F"""resnet{'-'.join(name.split('resnet'))}"""
print(a_)
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a_ , )
# we can use the convnext one
lowercase :Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''')
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a_ , )
print(F"""Pushed {checkpoint_name}""")
def lowerCamelCase (a_ :Path , a_ :str = None , a_ :bool = True) -> int:
lowercase :Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase :Union[str, Any] = 1000
lowercase :Any = (1, num_labels)
lowercase :Tuple = '''huggingface/label-files'''
lowercase :List[str] = num_labels
lowercase :Union[str, Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :Any = {int(a_): v for k, v in idalabel.items()}
lowercase :str = idalabel
lowercase :Any = {v: k for k, v in idalabel.items()}
lowercase :Union[str, Any] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_)
lowercase :Optional[int] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_)
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 677 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def A ( lowercase__ : str = "isbn/0140328726" ) -> dict:
UpperCamelCase__ :Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
UpperCamelCase__ :str = f"""{olid} is not a valid Open Library olid"""
raise ValueError(a_ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def A ( lowercase__ : dict ) -> dict:
UpperCamelCase__ :List[str] = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCamelCase__ :Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCamelCase__ :str = [
get_openlibrary_data(author["""key"""] )['''name'''] for author in data['''Authors''']
]
UpperCamelCase__ :Optional[int] = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(a_ , a_ ):
UpperCamelCase__ :Union[str, Any] = ''', '''.join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCamelCase = input("\nEnter the ISBN code to search (or \'quit\' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
UpperCamelCase = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("\n".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''') | 45 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : int = False
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=False ):
'''simple docstring'''
lowercase :Union[str, Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowercase :Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Any , snake_case__ : Dict , snake_case__ : Dict=1_3 , snake_case__ : Tuple=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Any=9_9 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Any=2 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=3_7 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : List[str]=1_6 , snake_case__ : Union[str, Any]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=4 , snake_case__ : int=None , ):
'''simple docstring'''
lowercase :Tuple = parent
lowercase :Tuple = batch_size
lowercase :Optional[Any] = seq_length
lowercase :Optional[Any] = is_training
lowercase :Optional[Any] = use_input_mask
lowercase :List[Any] = use_token_type_ids
lowercase :str = use_labels
lowercase :List[str] = vocab_size
lowercase :str = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Dict = num_attention_heads
lowercase :Any = intermediate_size
lowercase :List[str] = hidden_act
lowercase :Optional[Any] = hidden_dropout_prob
lowercase :List[Any] = attention_probs_dropout_prob
lowercase :List[Any] = max_position_embeddings
lowercase :List[Any] = type_vocab_size
lowercase :Union[str, Any] = type_sequence_label_size
lowercase :Union[str, Any] = initializer_range
lowercase :Any = num_labels
lowercase :int = num_choices
lowercase :Dict = scope
lowercase :Dict = embedding_size
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :int = None
if self.use_input_mask:
lowercase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase :Tuple = None
if self.use_token_type_ids:
lowercase :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase :Union[str, Any] = None
lowercase :int = None
lowercase :str = None
if self.use_labels:
lowercase :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase :Optional[int] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Dict = TFMobileBertModel(config=snake_case__ )
lowercase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
lowercase :Optional[int] = [input_ids, input_mask]
lowercase :Optional[int] = model(snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Any = TFMobileBertForMaskedLM(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Optional[Any] = TFMobileBertForNextSentencePrediction(config=snake_case__ )
lowercase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[Any] = TFMobileBertForSequenceClassification(config=snake_case__ )
lowercase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :Tuple = self.num_choices
lowercase :Any = TFMobileBertForMultipleChoice(config=snake_case__ )
lowercase :Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[str] = TFMobileBertForTokenClassification(config=snake_case__ )
lowercase :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str ):
'''simple docstring'''
lowercase :Union[str, Any] = TFMobileBertForQuestionAnswering(config=snake_case__ )
lowercase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Dict = config_and_inputs
lowercase :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
@slow
def __snake_case ( self : int ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowercase :List[str] = TFMobileBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase :Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase :List[Any] = model(snake_case__ )[0]
lowercase :Union[str, Any] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case__ )
lowercase :Optional[int] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 677 | 0 |
"""simple docstring"""
def UpperCAmelCase ( _lowercase : int ) -> bool:
"""simple docstring"""
if not isinstance(a_ , a_ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
lowerCAmelCase_ = str(a_ )
lowerCAmelCase_ = ''''''.join(sorted(a_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def UpperCAmelCase ( _lowercase : float = 9_9 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError('''solution() only accepts values from 0 to 100''' )
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
while True:
if check_bouncy(a_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(99)}""") | 552 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCamelCase (a_ :int) -> List[str]:
random.seed(a_)
np.random.seed(a_)
torch.manual_seed(a_)
torch.cuda.manual_seed_all(a_)
# ^^ safe to call this function even if cuda is not available
class __magic_name__ :
def __init__( self : Optional[Any] , snake_case__ : Iterable[torch.nn.Parameter] , snake_case__ : float = 0.99_99 , snake_case__ : float = 0.0 , snake_case__ : int = 0 , snake_case__ : bool = False , snake_case__ : Union[float, int] = 1.0 , snake_case__ : Union[float, int] = 2 / 3 , snake_case__ : Optional[Any] = None , snake_case__ : Dict[str, Any] = None , **snake_case__ : Tuple , ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :int = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase :Optional[Any] = True
if kwargs.get('''max_value''' , snake_case__ ) is not None:
lowercase :Optional[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :Optional[int] = kwargs['''max_value''']
if kwargs.get('''min_value''' , snake_case__ ) is not None:
lowercase :List[Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :str = kwargs['''min_value''']
lowercase :Any = list(snake_case__ )
lowercase :Optional[Any] = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , snake_case__ ) is not None:
lowercase :str = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
self.to(device=kwargs['''device'''] )
lowercase :int = None
lowercase :int = decay
lowercase :Union[str, Any] = min_decay
lowercase :List[Any] = update_after_step
lowercase :Union[str, Any] = use_ema_warmup
lowercase :Any = inv_gamma
lowercase :Any = power
lowercase :str = 0
lowercase :int = None # set in `step()`
lowercase :List[str] = model_cls
lowercase :Any = model_config
@classmethod
def __snake_case ( cls : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase , lowercase :int = model_cls.load_config(snake_case__ , return_unused_kwargs=snake_case__ )
lowercase :List[Any] = model_cls.from_pretrained(snake_case__ )
lowercase :Optional[int] = cls(model.parameters() , model_cls=snake_case__ , model_config=model.config )
ema_model.load_state_dict(snake_case__ )
return ema_model
def __snake_case ( self : int , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
lowercase :Dict = self.model_cls.from_config(self.model_config )
lowercase :Tuple = self.state_dict()
state_dict.pop('''shadow_params''' , snake_case__ )
model.register_to_config(**snake_case__ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case__ )
def __snake_case ( self : int , snake_case__ : int ):
'''simple docstring'''
lowercase :Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase :int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase :Dict = (1 + step) / (1_0 + step)
lowercase :Optional[int] = min(snake_case__ , self.decay )
# make sure decay is not smaller than min_decay
lowercase :Optional[int] = max(snake_case__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __snake_case ( self : Any , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :Tuple = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Union[str, Any] = parameters.parameters()
lowercase :Optional[Any] = list(snake_case__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase :List[Any] = self.get_decay(self.optimization_step )
lowercase :Optional[Any] = decay
lowercase :List[Any] = 1 - decay
lowercase :List[str] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase :Union[str, Any] = deepspeed.zero.GatheredParameters(snake_case__ , modifier_rank=snake_case__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case__ )
def __snake_case ( self : str , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :Optional[Any] = list(snake_case__ )
for s_param, param in zip(self.shadow_params , snake_case__ ):
param.data.copy_(s_param.to(param.device ).data )
def __snake_case ( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Dict=None ):
'''simple docstring'''
lowercase :str = [
p.to(device=snake_case__ , dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ )
for p in self.shadow_params
]
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __snake_case ( self : Optional[int] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :str = [param.detach().cpu().clone() for param in parameters]
def __snake_case ( self : List[Any] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , snake_case__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase :Dict = None
def __snake_case ( self : Union[str, Any] , snake_case__ : dict ):
'''simple docstring'''
lowercase :List[str] = copy.deepcopy(snake_case__ )
lowercase :Any = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
lowercase :int = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , snake_case__ ):
raise ValueError('''Invalid min_decay''' )
lowercase :List[Any] = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , snake_case__ ):
raise ValueError('''Invalid optimization_step''' )
lowercase :int = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , snake_case__ ):
raise ValueError('''Invalid update_after_step''' )
lowercase :Optional[int] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case__ ):
raise ValueError('''Invalid use_ema_warmup''' )
lowercase :Any = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
lowercase :Dict = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
lowercase :Optional[int] = state_dict.get('''shadow_params''' , snake_case__ )
if shadow_params is not None:
lowercase :List[Any] = shadow_params
if not isinstance(self.shadow_params , snake_case__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(snake_case__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 677 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase_ = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase_ = {
'distilbert-base-uncased': 5_12,
'distilbert-base-uncased-distilled-squad': 5_12,
'distilbert-base-cased': 5_12,
'distilbert-base-cased-distilled-squad': 5_12,
'distilbert-base-german-cased': 5_12,
'distilbert-base-multilingual-cased': 5_12,
}
lowerCamelCase_ = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowercase_ ( __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = DistilBertTokenizer
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]="[UNK]" , __lowerCamelCase : Optional[int]="[SEP]" , __lowerCamelCase : Tuple="[PAD]" , __lowerCamelCase : Dict="[CLS]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
_SCREAMING_SNAKE_CASE = getattr(snake_case__ , normalizer_state.pop("type" ) )
_SCREAMING_SNAKE_CASE = do_lower_case
_SCREAMING_SNAKE_CASE = strip_accents
_SCREAMING_SNAKE_CASE = tokenize_chinese_chars
_SCREAMING_SNAKE_CASE = normalizer_class(**snake_case__ )
_SCREAMING_SNAKE_CASE = do_lower_case
def lowerCAmelCase_ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 418 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :int , a_ :Union[str, Any] , a_ :List[Any]) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[int] , a_ :str , a_ :Any="attention") -> Optional[int]:
lowercase :Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :])
lowercase :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2])
lowercase :str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :])
lowercase :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2])
lowercase :int = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :])
lowercase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2])
lowercase :List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :])
lowercase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2])
return k, o, q, v
def lowerCamelCase (a_ :Any , a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> List[Any]:
if split_mlp_wi:
lowercase :List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase :Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase :Dict = (wi_a, wi_a)
else:
lowercase :Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase :Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase (a_ :Any , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase (a_ :dict , *, a_ :int , a_ :bool , a_ :bool = False) -> int:
lowercase :Dict = traverse_util.flatten_dict(variables['''target'''])
lowercase :Optional[Any] = {'''/'''.join(a_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase :str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , a_)
lowercase :str = collections.OrderedDict()
# Shared embeddings.
lowercase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Union[str, Any] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :Tuple = tax_attention_lookup(a_ , a_ , '''encoder''' , '''attention''')
lowercase :Dict = layer_norm
lowercase :Dict = k.T
lowercase :Union[str, Any] = o.T
lowercase :List[Any] = q.T
lowercase :int = v.T
# Block i, layer 1 (MLP).
lowercase :Optional[int] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :str = tax_mlp_lookup(a_ , a_ , '''encoder''' , a_)
lowercase :int = layer_norm
if split_mlp_wi:
lowercase :Tuple = wi[0].T
lowercase :Tuple = wi[1].T
else:
lowercase :int = wi.T
lowercase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Dict = tax_relpos_bias_lookup(
a_ , a_ , '''encoder''').T
lowercase :str = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase :str = tax_relpos_bias_lookup(
a_ , 0 , '''encoder''').T
lowercase :List[Any] = tax_relpos_bias_lookup(
a_ , 0 , '''decoder''').T
if not is_encoder_only:
# Decoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_self_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :str = tax_attention_lookup(a_ , a_ , '''decoder''' , '''self_attention''')
lowercase :List[str] = layer_norm
lowercase :Dict = k.T
lowercase :List[Any] = o.T
lowercase :List[Any] = q.T
lowercase :Any = v.T
# Block i, layer 1 (Cross Attention).
lowercase :Tuple = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_cross_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :int = tax_attention_lookup(a_ , a_ , '''decoder''' , '''encoder_decoder_attention''')
lowercase :int = layer_norm
lowercase :Dict = k.T
lowercase :int = o.T
lowercase :List[Any] = q.T
lowercase :Tuple = v.T
# Block i, layer 2 (MLP).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :Tuple = tax_mlp_lookup(a_ , a_ , '''decoder''' , a_)
lowercase :Any = layer_norm
if split_mlp_wi:
lowercase :int = wi[0].T
lowercase :Union[str, Any] = wi[1].T
else:
lowercase :int = wi.T
lowercase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Union[str, Any] = tax_relpos_bias_lookup(a_ , a_ , '''decoder''').T
lowercase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase :int = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase (a_ :Dict , a_ :bool) -> Tuple:
lowercase :str = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase :Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase :Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''')
lowercase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase (a_ :List[str] , a_ :List[str] , a_ :Tuple , a_ :Optional[int] , a_ :List[str]) -> List[str]:
lowercase :Optional[Any] = checkpoints.load_tax_checkpoint(a_)
lowercase :Optional[int] = convert_tax_to_pytorch(
a_ , num_layers=config.num_layers , is_encoder_only=a_ , scalable_attention=a_)
lowercase :Union[str, Any] = make_state_dict(a_ , a_)
model.load_state_dict(a_ , strict=a_)
def lowerCamelCase (a_ :str , a_ :Optional[int] , a_ :Any , a_ :bool = False , a_ :bool = False , ) -> Tuple:
lowercase :Optional[int] = MTaConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase :Union[str, Any] = UMTaEncoderModel(a_)
else:
lowercase :int = UMTaForConditionalGeneration(a_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a_ , a_ , a_ , a_ , a_)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
model.save_pretrained(a_)
# Verify that we can load the checkpoint.
model.from_pretrained(a_)
print('''Done''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 677 | 0 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
_a : Tuple = f'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
_a : Optional[int] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(a_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a : Tuple = int(math.log(number // 3 , 2 ) ) + 2
_a : str = [3, 5]
_a : int = 2
_a : List[Any] = 3
for block in range(1 , a_ ):
for _ in range(a_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
UpperCAmelCase_ : Tuple = 0
try:
UpperCAmelCase_ : Any = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 120 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def __lowercase ( snake_case ):
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case, snake_case="", snake_case="." ):
__magic_name__ :List[str] = []
for k, v in d.items():
__magic_name__ :Tuple = parent_key + sep + k if parent_key else k
if isinstance(a_, collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(a_, a_, sep=a_ ).items() )
else:
items.append((new_key, v) )
return dict(a_ )
__magic_name__ :Any = argparse.Namespace()
with open(a_, '''r''' ) as yaml_file:
try:
__magic_name__ :List[Any] = yaml.load(a_, Loader=yaml.FullLoader )
__magic_name__ :List[str] = flatten_yaml_as_dict(a_ )
for k, v in flat_cfg.items():
setattr(a_, a_, a_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(a_, str(a_ ) ) )
return config
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = MobileViTVaConfig()
__magic_name__ :Tuple = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__magic_name__ :Optional[int] = 1_0_0_0
if int(task_name.strip().split('''_''' )[-1] ) == 3_8_4:
__magic_name__ :List[Any] = 3_8_4
else:
__magic_name__ :str = 2_5_6
__magic_name__ :Dict = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__magic_name__ :Optional[int] = 2_1_0_0_0
if int(task_name.strip().split('''_''' )[-1] ) == 3_8_4:
__magic_name__ :List[Any] = 3_8_4
else:
__magic_name__ :Tuple = 2_5_6
__magic_name__ :Union[str, Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__magic_name__ :str = 1_5_1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = '''ade20k-id2label.json'''
__magic_name__ :Tuple = True
elif task_name.startswith('''voc_''' ):
__magic_name__ :int = 2_1
__magic_name__ :Tuple = 5_1_2
__magic_name__ :int = '''pascal-voc-id2label.json'''
__magic_name__ :Union[str, Any] = True
# orig_config
__magic_name__ :List[Any] = load_orig_config_file(a_ )
assert getattr(a_, '''model.classification.name''', -1 ) == "mobilevit_v2", "Invalid model"
__magic_name__ :Dict = getattr(a_, '''model.classification.mitv2.width_multiplier''', 1.0 )
assert (
getattr(a_, '''model.classification.mitv2.attn_norm_layer''', -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__magic_name__ :Any = getattr(a_, '''model.classification.activation.name''', '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__magic_name__ :List[Any] = getattr(a_, '''model.segmentation.output_stride''', 1_6 )
if "_deeplabv3" in task_name:
__magic_name__ :Any = getattr(a_, '''model.segmentation.deeplabv3.aspp_rates''', [1_2, 2_4, 3_6] )
__magic_name__ :Optional[Any] = getattr(a_, '''model.segmentation.deeplabv3.aspp_out_channels''', 5_1_2 )
__magic_name__ :Any = getattr(a_, '''model.segmentation.deeplabv3.aspp_dropout''', 0.1 )
# id2label
__magic_name__ :str = '''huggingface/label-files'''
__magic_name__ :Dict = json.load(open(hf_hub_download(a_, a_, repo_type='''dataset''' ), '''r''' ) )
__magic_name__ :Optional[int] = {int(a_ ): v for k, v in idalabel.items()}
__magic_name__ :int = idalabel
__magic_name__ :List[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = dct.pop(a_ )
__magic_name__ :Optional[int] = val
def __lowercase ( snake_case, snake_case=False ):
"""simple docstring"""
if base_model:
__magic_name__ :str = ''''''
else:
__magic_name__ :Optional[Any] = '''mobilevitv2.'''
__magic_name__ :List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__magic_name__ :List[str] = k[8:]
else:
__magic_name__ :Any = k
if ".block." in k:
__magic_name__ :Union[str, Any] = k_new.replace('''.block.''', '''.''' )
if ".conv." in k:
__magic_name__ :Optional[int] = k_new.replace('''.conv.''', '''.convolution.''' )
if ".norm." in k:
__magic_name__ :Dict = k_new.replace('''.norm.''', '''.normalization.''' )
if "conv_1." in k:
__magic_name__ :Optional[int] = k_new.replace('''conv_1.''', f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
__magic_name__ :Tuple = k_new.replace(f'''layer_{i}.''', f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
__magic_name__ :Dict = k_new.replace('''.exp_1x1.''', '''.expand_1x1.''' )
if ".red_1x1." in k:
__magic_name__ :Tuple = k_new.replace('''.red_1x1.''', '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
__magic_name__ :Tuple = k_new.replace(f'''layer_{i}.0.''', f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
__magic_name__ :str = k_new.replace(f'''layer_{i}.1.local_rep.0.''', f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
__magic_name__ :List[Any] = k_new.replace(f'''layer_{i}.1.local_rep.1.''', f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
__magic_name__ :str = [0, 1]
elif i == 4:
__magic_name__ :Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
__magic_name__ :str = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
__magic_name__ :Dict = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''', f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
__magic_name__ :Tuple = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''', f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
__magic_name__ :Dict = k_new.replace(f'''layer_{i}.1.conv_proj.''', f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
__magic_name__ :Optional[int] = k_new.replace('''pre_norm_attn.0.''', '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__magic_name__ :Union[str, Any] = k_new.replace('''pre_norm_attn.1.''', '''attention.''' )
if "pre_norm_ffn.0." in k:
__magic_name__ :str = k_new.replace('''pre_norm_ffn.0.''', '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__magic_name__ :Any = k_new.replace('''pre_norm_ffn.1.''', '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__magic_name__ :Tuple = k_new.replace('''pre_norm_ffn.3.''', '''ffn.conv2.''' )
if "classifier.1." in k:
__magic_name__ :List[Any] = k_new.replace('''classifier.1.''', '''classifier.''' )
if "seg_head." in k:
__magic_name__ :Optional[Any] = k_new.replace('''seg_head.''', '''segmentation_head.''' )
if ".aspp_layer." in k:
__magic_name__ :Optional[int] = k_new.replace('''.aspp_layer.''', '''.''' )
if ".aspp_pool." in k:
__magic_name__ :int = k_new.replace('''.aspp_pool.''', '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(a_ )
for k in keys_to_ignore:
state_dict.pop(a_, a_ )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__magic_name__ :Any = Image.open(requests.get(a_, stream=a_ ).raw )
return im
@torch.no_grad()
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = get_mobilevitva_config(a_, a_ )
# load original state_dict
__magic_name__ :Tuple = torch.load(a_, map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__magic_name__ :List[Any] = MobileViTVaForSemanticSegmentation(a_ ).eval()
__magic_name__ :Optional[int] = False
else:
__magic_name__ :Union[str, Any] = MobileViTVaForImageClassification(a_ ).eval()
__magic_name__ :Union[str, Any] = False
# remove and rename some keys of load the original model
__magic_name__ :Optional[int] = checkpoint
remove_unused_keys(a_ )
__magic_name__ :Dict = create_rename_keys(a_, base_model=a_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a_, a_, a_ )
# load modified state_dict
model.load_state_dict(a_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__magic_name__ :str = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 3_2 )
__magic_name__ :Optional[Any] = image_processor(images=prepare_img(), return_tensors='''pt''' )
__magic_name__ :str = model(**a_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
__magic_name__ :List[str] = outputs.logits
__magic_name__ :Tuple = logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__magic_name__ :Union[str, Any] = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3], a_, atol=1E-4 )
Path(a_ ).mkdir(exist_ok=a_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "donut-swin"
__A : Optional[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , snake_case__ : Any=2_2_4 , snake_case__ : Tuple=4 , snake_case__ : str=3 , snake_case__ : Dict=9_6 , snake_case__ : Optional[Any]=[2, 2, 6, 2] , snake_case__ : Any=[3, 6, 1_2, 2_4] , snake_case__ : List[str]=7 , snake_case__ : Dict=4.0 , snake_case__ : str=True , snake_case__ : Optional[int]=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Any=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Tuple=False , snake_case__ : int=0.02 , snake_case__ : Optional[Any]=1e-5 , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Union[str, Any] = image_size
lowercase :Optional[Any] = patch_size
lowercase :List[str] = num_channels
lowercase :Optional[int] = embed_dim
lowercase :Optional[Any] = depths
lowercase :List[Any] = len(snake_case__ )
lowercase :Optional[Any] = num_heads
lowercase :int = window_size
lowercase :str = mlp_ratio
lowercase :Optional[int] = qkv_bias
lowercase :Dict = hidden_dropout_prob
lowercase :Any = attention_probs_dropout_prob
lowercase :Any = drop_path_rate
lowercase :int = hidden_act
lowercase :int = use_absolute_embeddings
lowercase :List[str] = layer_norm_eps
lowercase :Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase :str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
| 677 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
snake_case = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
snake_case = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(a_ ) - np.asarray(a_ )) ** 2 ) )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(a_ , a_ ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCamelCase_ ( ):
"""simple docstring"""
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 424 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase (a_ :Optional[int] , a_ :tuple , a_ :Path , a_ :str , a_ :int , a_ :List[Any] , a_ :Any , a_ :Union[str, Any]=False , ) -> Dict:
output_path.parent.mkdir(parents=a_ , exist_ok=a_)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , )
else:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , )
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :int , a_ :bool = False) -> Union[str, Any]:
lowercase :Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase :Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''')
else:
lowercase :List[str] = '''cpu'''
lowercase :List[str] = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_).to(a_)
lowercase :List[Any] = Path(a_)
# TEXT ENCODER
lowercase :List[Any] = pipeline.text_encoder.config.max_position_embeddings
lowercase :Dict = pipeline.text_encoder.config.hidden_size
lowercase :Union[str, Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa)) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , )
del pipeline.text_encoder
# UNET
lowercase :Any = pipeline.unet.config.in_channels
lowercase :List[Any] = pipeline.unet.config.sample_size
lowercase :Optional[int] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a_ , a_ , a_).to(device=a_ , dtype=a_),
torch.randn(2).to(device=a_ , dtype=a_),
torch.randn(2 , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=a_ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , use_external_data_format=a_ , )
lowercase :List[Any] = str(unet_path.absolute().as_posix())
lowercase :str = os.path.dirname(a_)
lowercase :Optional[Any] = onnx.load(a_)
# clean up existing tensor files
shutil.rmtree(a_)
os.mkdir(a_)
# collate external tensor files into one
onnx.save_model(
a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location='''weights.pb''' , convert_attribute=a_ , )
del pipeline.unet
# VAE ENCODER
lowercase :Tuple = pipeline.vae
lowercase :Optional[Any] = vae_encoder.config.in_channels
lowercase :Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase :Any = lambda a_ , a_: vae_encoder.encode(a_ , a_)[0].sample()
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
# VAE DECODER
lowercase :Any = pipeline.vae
lowercase :Dict = vae_decoder.config.latent_channels
lowercase :Union[str, Any] = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase :List[Any] = vae_encoder.decode
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase :Dict = pipeline.safety_checker
lowercase :str = safety_checker.config.vision_config.num_channels
lowercase :str = safety_checker.config.vision_config.image_size
lowercase :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_),
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=a_ , )
del pipeline.safety_checker
lowercase :Tuple = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''')
lowercase :Optional[Any] = pipeline.feature_extractor
else:
lowercase :int = None
lowercase :Union[str, Any] = None
lowercase :Optional[int] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''') , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a_)
print('''ONNX pipeline saved to''' , a_)
del pipeline
del onnx_pipeline
lowercase :Tuple = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider='''CPUExecutionProvider''')
print('''ONNX pipeline is loadable''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCAmelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 677 | 0 |
'''simple docstring'''
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_SCREAMING_SNAKE_CASE : Optional[Any] = float("nan")
class _snake_case :
def __init__( self , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = sys.stdout
snake_case_ = open(snake_case__ , "a" )
def __getattr__( self , a__ ) -> Any:
'''simple docstring'''
return getattr(self.stdout , snake_case__ )
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
self.stdout.write(snake_case__ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , snake_case__ , 0 , re.M ) )
def UpperCamelCase_( snake_case : str=8_0 , snake_case : Dict=False ):
'''simple docstring'''
snake_case_ = []
# deal with critical env vars
snake_case_ = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
snake_case_ = os.environ.get(a_ , a_ )
if val is not None:
cmd.append(f'{key}={val}' )
# python executable (not always needed if the script is executable)
snake_case_ = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(a_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case_ = []
snake_case_ = ''''''
while len(a_ ) > 0:
current_line += f'{cmd.pop(0 )} '
if len(a_ ) == 0 or len(a_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(a_ )
snake_case_ = ''''''
return "\\\n".join(a_ )
def UpperCamelCase_( snake_case : int , snake_case : str ):
'''simple docstring'''
snake_case_ = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
snake_case_ = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
snake_case_ = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCamelCase_( snake_case : Dict , snake_case : int , snake_case : int , snake_case : int , snake_case : str , snake_case : str , snake_case : Optional[Any] ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_0_0.2, 55.6666, 2_2_2.2_2_2_2_2_2_2_2] )} , )
snake_case_ = subprocess.run(a_ , capture_output=a_ , text=a_ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
snake_case_ = variation.replace(" " , "-" )
with open(Path(a_ ) / f'log.{prefix}.stdout.txt' , "w" ) as f:
f.write(result.stdout )
with open(Path(a_ ) / f'log.{prefix}.stderr.txt' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f'{output_dir}/all_results.json' , "r" , encoding="utf-8" ) as f:
snake_case_ = json.load(a_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCamelCase_( snake_case : Optional[int] , snake_case : List[Any] , snake_case : str , snake_case : List[Any] , snake_case : Dict , snake_case : Optional[int] , snake_case : str , snake_case : str , snake_case : str , snake_case : Optional[Any] , ):
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = f'{id}: {variation:<{longest_variation_len}}'
snake_case_ = f'{preamble}: '
snake_case_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(a_ ) , desc=a_ , leave=a_ ):
snake_case_ = process_run_single(
a_ , a_ , a_ , a_ , a_ , a_ , a_ )
snake_case_ = single_run_metrics[target_metric_key]
if not math.isnan(a_ ):
metrics.append(a_ )
results.append(a_ )
outcome += "✓"
else:
outcome += "✘"
snake_case_ = f'\33[2K\r{outcome}'
if len(a_ ) > 0:
snake_case_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case_ = round(mean_metrics[target_metric_key] , 2 )
snake_case_ = f'{outcome} {mean_target}'
if len(a_ ) > 1:
results_str += f' {tuple(round(a_ , 2 ) for x in results )}'
print(a_ )
snake_case_ = variation
return mean_metrics
else:
print(a_ )
return {variation_key: variation, target_metric_key: nan}
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB\n'
def UpperCamelCase_( snake_case : List[Any] , snake_case : Optional[int] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict ):
'''simple docstring'''
snake_case_ = pd.DataFrame(a_ )
snake_case_ = '''variation'''
snake_case_ = '''diff_%'''
snake_case_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(a_ ):
# as a fallback, use the minimal value as the sentinel
snake_case_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(a_ ):
snake_case_ = df.apply(
lambda snake_case : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
snake_case_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case_ = df.reindex(a_ , axis="columns" ) # reorder cols
# capitalize
snake_case_ = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
snake_case_ = df.rename(lambda snake_case : c.replace("_" , "<br>" ) , axis="columns" )
snake_case_ = df.rename(lambda snake_case : c.replace("_" , "\n" ) , axis="columns" )
snake_case_ = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=a_ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=a_ , floatfmt=".2f" )]
print("\n\n".join(a_ ) )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=a_ , type=a_ , required=a_ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=a_ , type=a_ , nargs="+" , required=a_ , help="Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'" , )
parser.add_argument(
"--base-variation" , default=a_ , type=a_ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=a_ , type=a_ , required=a_ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=a_ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=a_ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=a_ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=a_ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
snake_case_ = parser.parse_args()
snake_case_ = args.output_dir
Path(a_ ).mkdir(exist_ok=a_ )
snake_case_ = get_base_command(a_ , a_ )
# split each dimension into its --foo variations
snake_case_ = [list(map(str.strip , re.split(r"\|" , a_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case_ = list(map(str.strip , map(" ".join , itertools.product(*a_ ) ) ) )
snake_case_ = max(len(a_ ) for x in variations )
# split wanted keys
snake_case_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case_ = f'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(f'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(f'and this script\'s output is also piped into {report_fn}' )
snake_case_ = Tee(a_ )
print(f'\n*** Running {len(a_ )} benchmarks:' )
print(f'Base command: {" ".join(a_ )}' )
snake_case_ = '''variation'''
snake_case_ = []
for id, variation in enumerate(tqdm(a_ , desc="Total completion: " , leave=a_ ) ):
snake_case_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , a_ , a_ , a_ , a_ , args.target_metric_key , a_ , args.repeat_times , a_ , args.verbose , ) )
process_results(a_ , args.target_metric_key , a_ , args.base_variation , a_ )
if __name__ == "__main__":
main()
| 400 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase (a_ :List[Any] , a_ :Union[str, Any] , a_ :Tuple , a_ :List[str] , a_ :str=True , a_ :str="pt") -> List[str]:
lowercase :Optional[int] = {'''add_prefix_space''': True} if isinstance(a_ , a_) and not line.startswith(''' ''') else {}
lowercase :Optional[int] = padding_side
return tokenizer(
[line] , max_length=a_ , padding='''max_length''' if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , )
def lowerCamelCase (a_ :str , a_ :Tuple , a_ :Optional[Any]=None , ) -> Tuple:
lowercase :Optional[Any] = input_ids.ne(a_).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str="train" , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Dict="" , ):
'''simple docstring'''
super().__init__()
lowercase :Tuple = Path(snake_case__ ).joinpath(type_path + '''.source''' )
lowercase :Union[str, Any] = Path(snake_case__ ).joinpath(type_path + '''.target''' )
lowercase :List[Any] = self.get_char_lens(self.src_file )
lowercase :Tuple = max_source_length
lowercase :Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase :Any = tokenizer
lowercase :Tuple = prefix
if n_obs is not None:
lowercase :List[str] = self.src_lens[:n_obs]
lowercase :List[Any] = src_lang
lowercase :str = tgt_lang
def __len__( self : Any ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : str , snake_case__ : Any ):
'''simple docstring'''
lowercase :Optional[int] = index + 1 # linecache starts at 1
lowercase :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip('''\n''' )
lowercase :Dict = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase :Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
lowercase :Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
lowercase :Optional[int] = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''' )
lowercase :Tuple = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''' )
lowercase :List[str] = source_inputs['''input_ids'''].squeeze()
lowercase :Optional[Any] = target_inputs['''input_ids'''].squeeze()
lowercase :List[str] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __snake_case ( snake_case__ : Optional[int] ):
'''simple docstring'''
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowercase :str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :List[Any] = trim_batch(snake_case__ , snake_case__ )
lowercase , lowercase :List[str] = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
lowercase :Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase = getLogger(__name__)
def lowerCamelCase (a_ :List[List]) -> Tuple:
return list(itertools.chain.from_iterable(a_))
def lowerCamelCase (a_ :str) -> None:
lowercase :List[str] = get_git_info()
save_json(a_ , os.path.join(a_ , '''git_log.json'''))
def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=4 , **a_ :Optional[Any]) -> str:
with open(a_ , '''w''') as f:
json.dump(a_ , a_ , indent=a_ , **a_)
def lowerCamelCase (a_ :Dict) -> Union[str, Any]:
with open(a_) as f:
return json.load(a_)
def lowerCamelCase () -> List[str]:
lowercase :Dict = git.Repo(search_parent_directories=a_)
lowercase :int = {
'''repo_id''': str(a_),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase (a_ :Callable , a_ :Iterable) -> List:
return list(map(a_ , a_))
def lowerCamelCase (a_ :Optional[Any] , a_ :str) -> Any:
with open(a_ , '''wb''') as f:
return pickle.dump(a_ , a_)
def lowerCamelCase (a_ :List[str]) -> List[str]:
def remove_articles(a_ :Union[str, Any]):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , a_)
def white_space_fix(a_ :Tuple):
return " ".join(text.split())
def remove_punc(a_ :int):
lowercase :List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(a_ :int):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_))))
def lowerCamelCase (a_ :List[str] , a_ :Any) -> List[str]:
lowercase :Dict = normalize_answer(a_).split()
lowercase :int = normalize_answer(a_).split()
lowercase :List[Any] = Counter(a_) & Counter(a_)
lowercase :Optional[int] = sum(common.values())
if num_same == 0:
return 0
lowercase :str = 1.0 * num_same / len(a_)
lowercase :Tuple = 1.0 * num_same / len(a_)
lowercase :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase (a_ :Tuple , a_ :Optional[Any]) -> List[Any]:
return normalize_answer(a_) == normalize_answer(a_)
def lowerCamelCase (a_ :List[str] , a_ :List[str]) -> Dict:
assert len(a_) == len(a_)
lowercase :Any = 0
for hypo, pred in zip(a_ , a_):
em += exact_match_score(a_ , a_)
if len(a_) > 0:
em /= len(a_)
return {"em": em}
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
return model_prefix.startswith('''rag''')
def lowerCamelCase (a_ :List[str] , a_ :Tuple , a_ :List[str]) -> Any:
lowercase :List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase :str = '''dropout_rate'''
for p in extra_params:
if getattr(a_ , a_ , a_):
if not hasattr(a_ , a_) and not hasattr(a_ , equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(a_))
delattr(a_ , a_)
continue
lowercase :List[str] = p if hasattr(a_ , a_) else equivalent_param[p]
setattr(a_ , a_ , getattr(a_ , a_))
delattr(a_ , a_)
return hparams, config
| 677 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ , A__ , A__ ):
if height >= 1:
move_tower(height - 1 , a_ , a_ , a_ )
move_disk(a_ , a_ )
move_tower(height - 1 , a_ , a_ , a_ )
def UpperCamelCase_ ( A__ , A__ ):
print("""moving disk from""" , a_ , """to""" , a_ )
def UpperCamelCase_ ( ):
a_ = int(input("""Height of hanoi: """ ).strip() )
move_tower(a_ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 263 |
"""simple docstring"""
def lowerCamelCase (a_ :Tuple , a_ :int , a_ :Tuple , a_ :List[Any]) -> str:
if height >= 1:
move_tower(height - 1 , a_ , a_ , a_)
move_disk(a_ , a_)
move_tower(height - 1 , a_ , a_ , a_)
def lowerCamelCase (a_ :int , a_ :Union[str, Any]) -> str:
print('''moving disk from''' , a_ , '''to''' , a_)
def lowerCamelCase () -> Tuple:
lowercase :int = int(input('''Height of hanoi: ''').strip())
move_tower(a_ , '''A''' , '''B''' , '''C''')
if __name__ == "__main__":
main()
| 677 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCAmelCase( __UpperCAmelCase ):
lowercase__ = 0
lowercase__ = False
lowercase__ = 3.0
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {})
self.assertDictEqual(MockClass(a=2).to_kwargs() , {'''a''': 2})
self.assertDictEqual(MockClass(a=2 , b=snake_case__).to_kwargs() , {'''a''': 2, '''b''': True})
self.assertDictEqual(MockClass(a=2 , c=2.25).to_kwargs() , {'''a''': 2, '''c''': 2.25})
@require_cuda
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = GradScalerKwargs(init_scale=10_24 , growth_factor=2)
AcceleratorState._reset_state()
_UpperCamelCase = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
_UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0)
self.assertEqual(scaler._growth_factor , 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5)
self.assertEqual(scaler._growth_interval , 20_00)
self.assertEqual(scaler._enabled , snake_case__)
@require_multi_gpu
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
execute_subprocess_async(snake_case__ , env=os.environ.copy())
if __name__ == "__main__":
_a = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_a = Accelerator(kwargs_handlers=[ddp_scaler])
_a = torch.nn.Linear(100, 200)
_a = accelerator.prepare(model)
# Check the values changed in kwargs
_a = """"""
_a = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 19 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCAmelCase = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCAmelCase = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __snake_case ( self : List[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : str=None , snake_case__ : List[Any]="uniform_average" , snake_case__ : Dict=True ):
'''simple docstring'''
lowercase :Dict = mean_squared_error(
snake_case__ , snake_case__ , sample_weight=snake_case__ , multioutput=snake_case__ , squared=snake_case__ )
return {"mse": mse}
| 677 | 0 |
"""simple docstring"""
from statistics import mean, stdev
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 3 ) -> list:
lowercase__: List[Any] = min(a_ )
lowercase__: str = max(a_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , a_ ) for x in data]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 3 ) -> list:
lowercase__: Optional[int] = mean(a_ )
lowercase__: Dict = stdev(a_ )
# standardize data
return [round((x - mu) / (sigma) , a_ ) for x in data]
| 586 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def __snake_case ( snake_case__ : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError()
| 677 | 0 |
import unittest
from knapsack import knapsack as k
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = 0
snake_case : Any = [0]
snake_case : List[Any] = [0]
snake_case : Any = len(snake_case__ )
self.assertEqual(k.knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , 0 )
snake_case : Optional[int] = [60]
snake_case : int = [10]
snake_case : Any = len(snake_case__ )
self.assertEqual(k.knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , 0 )
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[int]:
'''simple docstring'''
snake_case : int = 3
snake_case : Tuple = [1, 2, 3]
snake_case : Dict = [3, 2, 1]
snake_case : List[str] = len(snake_case__ )
self.assertEqual(k.knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , 5 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = 50
snake_case : Dict = [60, 1_00, 1_20]
snake_case : Union[str, Any] = [10, 20, 30]
snake_case : Any = len(snake_case__ )
self.assertEqual(k.knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 204 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def A ( lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Optional[Any] ) -> Any:
# Initialise PyTorch model
UpperCamelCase__ :Optional[int] = RemBertConfig.from_json_file(a_ )
print("""Building PyTorch model from configuration: {}""".format(str(a_ ) ) )
UpperCamelCase__ :Tuple = RemBertModel(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(a_ , a_ , a_ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(a_ ) )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 45 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : int=3_2 , snake_case__ : int=3 , snake_case__ : str=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : int=[1, 1, 2, 1] , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase :Union[str, Any] = parent
lowercase :Optional[Any] = batch_size
lowercase :Dict = image_size
lowercase :Any = num_channels
lowercase :List[str] = embeddings_size
lowercase :Union[str, Any] = hidden_sizes
lowercase :Any = depths
lowercase :Dict = is_training
lowercase :Any = use_labels
lowercase :Any = hidden_act
lowercase :List[str] = num_labels
lowercase :List[Any] = scope
lowercase :int = len(snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :Union[str, Any] = self.get_config()
return config, pixel_values
def __snake_case ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Any = FlaxRegNetModel(config=snake_case__ )
lowercase :str = model(snake_case__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.num_labels
lowercase :str = FlaxRegNetForImageClassification(config=snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = self.prepare_config_and_inputs()
lowercase , lowercase :Tuple = config_and_inputs
lowercase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__A : str = False
__A : Tuple = False
__A : Dict = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Dict = FlaxRegNetModelTester(self )
lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(snake_case__ )
lowercase :int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Tuple = [*signature.parameters.keys()]
lowercase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowercase :int = model_class(snake_case__ )
lowercase :Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase :Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase :Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase :List[Any] = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ : str , **snake_case__ : Optional[int] ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest('''JIT Enabled''' ):
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase () -> Tuple:
lowercase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_flax
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase :Optional[Any] = self.default_image_processor
lowercase :Dict = prepare_img()
lowercase :Any = image_processor(images=snake_case__ , return_tensors='''np''' )
lowercase :List[str] = model(**snake_case__ )
# verify the logits
lowercase :Any = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :List[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 677 | 0 |
"""simple docstring"""
def UpperCAmelCase ( _lowercase : list[list] ) -> list[list]:
"""simple docstring"""
lowerCAmelCase_ = current_set.copy()
for row_index, row in enumerate(a_ ):
lowerCAmelCase_ = row[0]
for column_index, column in enumerate(a_ ):
if magnitude == 0:
lowerCAmelCase_ = column
continue
lowerCAmelCase_ = column / magnitude
# Subtract to cancel term
lowerCAmelCase_ = current_set[0]
lowerCAmelCase_ = [first_row]
lowerCAmelCase_ = current_set[1::]
for row in current_set:
lowerCAmelCase_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(a_ )
continue
for column_index in range(len(a_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(a_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCAmelCase_ = final_set[0]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCAmelCase_ = simplify(a_ )
for i in range(len(a_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , a_ )
lowerCAmelCase_ = resultant
return final_set
def UpperCAmelCase ( _lowercase : list[list] ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
lowerCAmelCase_ = len(a_ ) + 1
if any(len(a_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(a_ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(a_ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCAmelCase_ = equations.copy()
if any(0 in row for row in data_set ):
lowerCAmelCase_ = data_set.copy()
lowerCAmelCase_ = []
for row_index, row in enumerate(a_ ):
if 0 not in row:
lowerCAmelCase_ = data_set.pop(a_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , a_ )
lowerCAmelCase_ = data_set.copy()
lowerCAmelCase_ = simplify(a_ )
lowerCAmelCase_ = simplified[::-1]
lowerCAmelCase_ = []
for row in simplified:
lowerCAmelCase_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCAmelCase_ = row.copy()[: len(a_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(a_ ) == 0:
solutions.append(0 )
continue
lowerCAmelCase_ = temp_row[1::]
lowerCAmelCase_ = temp_row[::-1]
for column_index, column in enumerate(a_ ):
current_solution -= column * solutions[column_index]
solutions.append(a_ )
lowerCAmelCase_ = []
for item in solutions:
final.append(float(round(a_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]])) | 552 |
"""simple docstring"""
UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase (a_ :dict , a_ :List[str] , a_ :Tuple) -> list[str]:
lowercase :str = set()
# keep track of all the paths to be checked
lowercase :Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase :Optional[int] = queue.pop(0)
# get the last node from the path
lowercase :Any = path[-1]
if node not in explored:
lowercase :int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase :List[Any] = list(a_)
new_path.append(a_)
queue.append(a_)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a_)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase (a_ :dict , a_ :List[Any] , a_ :List[Any]) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase :List[str] = [start]
lowercase :Optional[Any] = set(a_)
# Keep tab on distances from `start` node.
lowercase :Union[str, Any] = {start: 0, target: -1}
while queue:
lowercase :Union[str, Any] = queue.pop(0)
if node == target:
lowercase :Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a_)
queue.append(a_)
lowercase :Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 677 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str=1_3 , __lowerCamelCase : Tuple=7 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=9_9 , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=4 , __lowerCamelCase : str=3_7 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=5_1_2 , __lowerCamelCase : Dict=1_6 , __lowerCamelCase : int=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=1_0_0_0 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = range_bbox
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_SCREAMING_SNAKE_CASE = bbox[i, j, 3]
_SCREAMING_SNAKE_CASE = bbox[i, j, 1]
_SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_SCREAMING_SNAKE_CASE = bbox[i, j, 2]
_SCREAMING_SNAKE_CASE = bbox[i, j, 0]
_SCREAMING_SNAKE_CASE = t
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(snake_case__ )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TFLayoutLMModel(config=snake_case__ )
_SCREAMING_SNAKE_CASE = model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
_SCREAMING_SNAKE_CASE = model(snake_case__ , snake_case__ , token_type_ids=snake_case__ )
_SCREAMING_SNAKE_CASE = model(snake_case__ , snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TFLayoutLMForMaskedLM(config=snake_case__ )
_SCREAMING_SNAKE_CASE = model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFLayoutLMForSequenceClassification(config=snake_case__ )
_SCREAMING_SNAKE_CASE = model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFLayoutLMForTokenClassification(config=snake_case__ )
_SCREAMING_SNAKE_CASE = model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TFLayoutLMForQuestionAnswering(config=snake_case__ )
_SCREAMING_SNAKE_CASE = model(snake_case__ , snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = 10
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TFLayoutLMModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFLayoutLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
_SCREAMING_SNAKE_CASE = prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE = model(input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
# test the sequence output on [0, :3, :3]
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case__ , atol=1e-3 ) )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
_SCREAMING_SNAKE_CASE = prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE = model(
input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_SCREAMING_SNAKE_CASE = outputs.loss
_SCREAMING_SNAKE_CASE = (2,)
self.assertEqual(loss.shape , snake_case__ )
# test the shape of the logits
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = (2, 2)
self.assertEqual(logits.shape , snake_case__ )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=1_3 )
_SCREAMING_SNAKE_CASE = prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE = model(
input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
# test the shape of the logits
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , snake_case__ )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
_SCREAMING_SNAKE_CASE = prepare_layoutlm_batch_inputs()
# forward pass
_SCREAMING_SNAKE_CASE = model(input_ids=snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
# test the shape of the logits
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , snake_case__ )
self.assertEqual(outputs.end_logits.shape , snake_case__ )
| 418 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase (a_ :str , a_ :List[str]=100 , a_ :Optional[Any]=" ") -> List[str]:
lowercase :str = text.split(a_)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(a_) , a_)]
def lowerCamelCase (a_ :dict) -> dict:
lowercase , lowercase :str = [], []
for title, text in zip(documents['''title'''] , documents['''text''']):
if text is not None:
for passage in split_text(a_):
titles.append(title if title is not None else '''''')
texts.append(a_)
return {"title": titles, "text": texts}
def lowerCamelCase (a_ :dict , a_ :DPRContextEncoder , a_ :DPRContextEncoderTokenizerFast) -> dict:
lowercase :Tuple = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''')['''input_ids''']
lowercase :Optional[Any] = ctx_encoder(input_ids.to(device=a_) , return_dict=a_).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase (a_ :"RagExampleArguments" , a_ :"ProcessingArguments" , a_ :"IndexHnswArguments" , ) -> Any:
######################################
logger.info('''Step 1 - Create the dataset''')
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase :List[Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase :Optional[Any] = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase :str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=a_)
lowercase :Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase :str = Features(
{'''text''': Value('''string'''), '''title''': Value('''string'''), '''embeddings''': Sequence(Value('''float32'''))}) # optional, save as float32 instead of float64 to save space
lowercase :Optional[Any] = dataset.map(
partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , )
# And finally save your dataset
lowercase :str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''')
dataset.save_to_disk(a_)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''')
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase :str = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('''embeddings''' , custom_index=a_)
# And save the index
lowercase :Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''')
dataset.get_index('''embeddings''').save(a_)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __magic_name__ :
__A : str = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
__A : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
__A : Optional[str] = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class __magic_name__ :
__A : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
__A : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class __magic_name__ :
__A : int = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
__A : int = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 677 | 0 |
'''simple docstring'''
import requests
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Dict = {'''Content-Type''': '''application/json'''}
_a : Optional[Any] = requests.post(a_ , json={'text': message_body} , headers=a_ )
if response.status_code != 2_0_0:
_a : Union[str, Any] = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(a_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 120 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( __UpperCAmelCase ):
a__ = "ClapFeatureExtractor"
a__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = kwargs.pop('''sampling_rate''' , snake_case__ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__magic_name__ :List[Any] = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if audios is not None:
__magic_name__ :str = self.feature_extractor(
snake_case__ , sampling_rate=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and audios is not None:
__magic_name__ :int = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.tokenizer.model_input_names
__magic_name__ :int = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 0 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ :
def __init__( self : Tuple , snake_case__ : str = None , snake_case__ : uuid.UUID = None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None ):
'''simple docstring'''
if not conversation_id:
lowercase :List[Any] = uuid.uuida()
if past_user_inputs is None:
lowercase :Union[str, Any] = []
if generated_responses is None:
lowercase :List[str] = []
lowercase :uuid.UUID = conversation_id
lowercase :List[str] = past_user_inputs
lowercase :List[str] = generated_responses
lowercase :Optional[str] = text
def __eq__( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __snake_case ( self : Optional[int] , snake_case__ : str , snake_case__ : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
lowercase :List[str] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowercase :Optional[int] = text
def __snake_case ( self : Any ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase :Tuple = None
def __snake_case ( self : Tuple , snake_case__ : str ):
'''simple docstring'''
self.generated_responses.append(snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict ):
'''simple docstring'''
lowercase :int = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowercase :Dict = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__UpperCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if self.tokenizer.pad_token_id is None:
lowercase :Any = self.tokenizer.eos_token
def __snake_case ( self : List[Any] , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :str = {}
lowercase :List[str] = {}
lowercase :Tuple = {}
if min_length_for_response is not None:
lowercase :Dict = min_length_for_response
if minimum_tokens is not None:
lowercase :Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
lowercase :List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase :Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , snake_case__ : Union[Conversation, List[Conversation]] , snake_case__ : int=0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :int = super().__call__(snake_case__ , num_workers=snake_case__ , **snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1:
return outputs[0]
return outputs
def __snake_case ( self : List[Any] , snake_case__ : Conversation , snake_case__ : Any=3_2 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowercase :List[str] = self.tokenizer._build_conversation_input_ids(snake_case__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase :List[str] = self._legacy_parse_and_tokenize(snake_case__ )
if self.framework == "pt":
lowercase :int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase :Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __snake_case ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Any=1_0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowercase :Optional[Any] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowercase :int = max_length - minimum_tokens
lowercase :int = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowercase :int = model_inputs['''attention_mask'''][:, -trim:]
lowercase :int = model_inputs.pop('''conversation''' )
lowercase :Union[str, Any] = max_length
lowercase :Dict = self.model.generate(**snake_case__ , **snake_case__ )
if self.model.config.is_encoder_decoder:
lowercase :List[Any] = 1
else:
lowercase :Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any]=True ):
'''simple docstring'''
lowercase :Dict = model_outputs['''output_ids''']
lowercase :Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , )
lowercase :Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(snake_case__ )
return conversation
def __snake_case ( self : List[Any] , snake_case__ : Conversation ):
'''simple docstring'''
lowercase :str = self.tokenizer.eos_token_id
lowercase :List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
if len(snake_case__ ) > self.tokenizer.model_max_length:
lowercase :List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 677 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
snake_case = threading.Lock()
snake_case = None
snake_case = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
snake_case = logging.WARNING
snake_case = True
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : str = os.getenv("TRANSFORMERS_VERBOSITY" , a_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCamelCase_ ( ):
"""simple docstring"""
return __name__.split("." )[0]
def UpperCamelCase_ ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCamelCase_ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowerCAmelCase : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream.
_lowerCAmelCase : int = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowerCAmelCase : List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowerCAmelCase : List[Any] = False
def UpperCamelCase_ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_lowerCAmelCase : Any = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowerCAmelCase : Tuple = None
def UpperCamelCase_ ( ):
"""simple docstring"""
return log_levels
def UpperCamelCase_ ( lowerCAmelCase__ = None ):
"""simple docstring"""
if name is None:
_lowerCAmelCase : List[str] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(a_ )
def UpperCamelCase_ ( ):
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(a_ )
def UpperCamelCase_ ( ):
"""simple docstring"""
return set_verbosity(a_ )
def UpperCamelCase_ ( ):
"""simple docstring"""
return set_verbosity(a_ )
def UpperCamelCase_ ( ):
"""simple docstring"""
return set_verbosity(a_ )
def UpperCamelCase_ ( ):
"""simple docstring"""
return set_verbosity(a_ )
def UpperCamelCase_ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCamelCase_ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(a_ )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(a_ )
def UpperCamelCase_ ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowerCAmelCase : Optional[Any] = False
def UpperCamelCase_ ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowerCAmelCase : Optional[int] = True
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : int = _get_library_root_logger().handlers
for handler in handlers:
_lowerCAmelCase : Optional[int] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(a_ )
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(a_ )
def UpperCamelCase_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , a_ )
if no_advisory_warnings:
return
self.warning(*a_ , **a_ )
snake_case = warning_advice
@functools.lru_cache(a_ )
def UpperCamelCase_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
self.warning(*a_ , **a_ )
snake_case = warning_once
class __A :
'''simple docstring'''
def __init__( self , *_snake_case , **_snake_case ): # pylint: disable=unused-argument
_lowerCAmelCase : List[str] = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , _snake_case ):
def empty_fn(*_snake_case , **_snake_case ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , _snake_case , _snake_case , _snake_case ):
return
class __A :
'''simple docstring'''
def __call__( self , *_snake_case , **_snake_case ):
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self , *_snake_case , **_snake_case ):
_lowerCAmelCase : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
snake_case = _tqdm_cls()
def UpperCamelCase_ ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase_ ( ):
"""simple docstring"""
global _tqdm_active
_lowerCAmelCase : List[Any] = True
hf_hub_utils.enable_progress_bars()
def UpperCamelCase_ ( ):
"""simple docstring"""
global _tqdm_active
_lowerCAmelCase : Tuple = False
hf_hub_utils.disable_progress_bars()
| 424 |
"""simple docstring"""
def lowerCamelCase (a_ :int = 100) -> int:
lowercase :Union[str, Any] = set()
lowercase :List[Any] = 0
lowercase :Dict = n + 1 # maximum limit
for a in range(2 , a_):
for b in range(2 , a_):
lowercase :Tuple = a**b # calculates the current power
collect_powers.add(a_) # adds the result to the set
return len(a_)
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 677 | 0 |
'''simple docstring'''
class _snake_case :
def __init__( self , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = arr.split("," )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = [int(self.array[0] )] * len(self.array )
snake_case_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
snake_case_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
snake_case_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = input("please input some numbers:")
_SCREAMING_SNAKE_CASE : Optional[Any] = SubArray(whole_array)
_SCREAMING_SNAKE_CASE : str = array.solve_sub_array()
print(("the results is:", re))
| 400 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[Any] = "xlm-prophetnet"
__A : List[str] = ["past_key_values"]
__A : int = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : Any , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[Union[str, Callable]] = "gelu" , snake_case__ : Optional[int] = 3_0_5_2_2 , snake_case__ : Optional[int] = 1_0_2_4 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[int] = 5_1_2 , snake_case__ : Optional[float] = 0.02 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 2 , snake_case__ : Optional[int] = 3_2 , snake_case__ : Optional[int] = 1_2_8 , snake_case__ : Optional[bool] = False , snake_case__ : Optional[float] = 0.0 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 1 , snake_case__ : Optional[int] = 2 , **snake_case__ : List[str] , ):
'''simple docstring'''
lowercase :Tuple = vocab_size
lowercase :Optional[int] = hidden_size
lowercase :Optional[int] = encoder_ffn_dim
lowercase :Optional[int] = num_encoder_layers
lowercase :Dict = num_encoder_attention_heads
lowercase :List[str] = decoder_ffn_dim
lowercase :Dict = num_decoder_layers
lowercase :List[Any] = num_decoder_attention_heads
lowercase :Optional[int] = max_position_embeddings
lowercase :Tuple = init_std # Normal(0, this parameter)
lowercase :int = activation_function
# parameters for xlmprophetnet
lowercase :Dict = ngram
lowercase :Optional[Any] = num_buckets
lowercase :Dict = relative_max_distance
lowercase :List[Any] = disable_ngram_loss
lowercase :Optional[Any] = eps
# 3 Types of Dropout
lowercase :Any = attention_dropout
lowercase :List[str] = activation_dropout
lowercase :List[str] = dropout
lowercase :List[str] = use_cache
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 677 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 19 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = ["image_processor", "tokenizer"]
__A : Dict = "BlipImageProcessor"
__A : Dict = "AutoTokenizer"
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str ):
'''simple docstring'''
lowercase :Dict = False
super().__init__(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = self.image_processor
def __call__( self : Optional[int] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase :List[Any] = self.tokenizer
lowercase :str = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowercase :Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowercase :int = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowercase :Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def __snake_case ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def __snake_case ( self : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.tokenizer.model_input_names
lowercase :List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 677 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 586 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( __UpperCAmelCase ):
@require_torch
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[Any] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Any = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :Tuple = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :Any = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :List[str] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Dict = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :List[Any] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :str = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :str = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase :Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase :Optional[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :Union[str, Any] = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase :Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :Any = '''1'''
lowercase :Optional[Any] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Dict = '''
from transformers import pipeline
'''
lowercase :Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase :Dict = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase :Tuple = self.get_env()
lowercase :Optional[Any] = '''1'''
lowercase :Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = '''
from transformers import AutoModel
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :List[str] = self.get_env()
lowercase :Optional[int] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :Tuple = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 677 | 0 |
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Dict = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Any = 0
while number > 0:
snake_case : str = number % 10
sum_of_digits += last_digit
snake_case : List[Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCamelCase ( __lowerCamelCase : int = 100 ):
snake_case : int = factorial(a_ )
snake_case : Dict = split_and_add(a_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 204 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger()
@dataclass
class __magic_name__ :
__A : nn.Module
__A : List[nn.Module] = field(default_factory=__UpperCAmelCase )
__A : list = field(default_factory=__UpperCAmelCase )
def __snake_case ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tensor , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self : int , snake_case__ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def __snake_case ( self : int ):
'''simple docstring'''
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
__A : nn.Module
__A : nn.Module
__A : int = 0
__A : List = field(default_factory=__UpperCAmelCase )
__A : List = field(default_factory=__UpperCAmelCase )
def __call__( self : Dict , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :Dict = Tracker(self.dest )(snake_case__ ).parametrized
lowercase :Optional[Any] = Tracker(self.src )(snake_case__ ).parametrized
lowercase :List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
lowercase :Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while"""
f""" destination module has {len(snake_case__ )}.""" )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCamelCase (a_ :str , a_ :ResNetConfig , a_ :Path , a_ :bool = True) -> Optional[Any]:
print(F"""Converting {name}...""")
with torch.no_grad():
lowercase :Union[str, Any] = timm.create_model(a_ , pretrained=a_).eval()
lowercase :Tuple = ResNetForImageClassification(a_).eval()
lowercase :int = ModuleTransfer(src=a_ , dest=a_)
lowercase :List[Any] = torch.randn((1, 3, 224, 224))
module_transfer(a_)
assert torch.allclose(from_model(a_) , our_model(a_).logits), "The model logits don't match the original one."
lowercase :List[Any] = F"""resnet{'-'.join(name.split('resnet'))}"""
print(a_)
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a_ , )
# we can use the convnext one
lowercase :Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''')
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a_ , )
print(F"""Pushed {checkpoint_name}""")
def lowerCamelCase (a_ :Path , a_ :str = None , a_ :bool = True) -> int:
lowercase :Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase :Union[str, Any] = 1000
lowercase :Any = (1, num_labels)
lowercase :Tuple = '''huggingface/label-files'''
lowercase :List[str] = num_labels
lowercase :Union[str, Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :Any = {int(a_): v for k, v in idalabel.items()}
lowercase :str = idalabel
lowercase :Any = {v: k for k, v in idalabel.items()}
lowercase :Union[str, Any] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_)
lowercase :Optional[int] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_)
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 677 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 45 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : int = False
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=False ):
'''simple docstring'''
lowercase :Union[str, Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowercase :Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Any , snake_case__ : Dict , snake_case__ : Dict=1_3 , snake_case__ : Tuple=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Any=9_9 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Any=2 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=3_7 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : List[str]=1_6 , snake_case__ : Union[str, Any]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=4 , snake_case__ : int=None , ):
'''simple docstring'''
lowercase :Tuple = parent
lowercase :Tuple = batch_size
lowercase :Optional[Any] = seq_length
lowercase :Optional[Any] = is_training
lowercase :Optional[Any] = use_input_mask
lowercase :List[Any] = use_token_type_ids
lowercase :str = use_labels
lowercase :List[str] = vocab_size
lowercase :str = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Dict = num_attention_heads
lowercase :Any = intermediate_size
lowercase :List[str] = hidden_act
lowercase :Optional[Any] = hidden_dropout_prob
lowercase :List[Any] = attention_probs_dropout_prob
lowercase :List[Any] = max_position_embeddings
lowercase :List[Any] = type_vocab_size
lowercase :Union[str, Any] = type_sequence_label_size
lowercase :Union[str, Any] = initializer_range
lowercase :Any = num_labels
lowercase :int = num_choices
lowercase :Dict = scope
lowercase :Dict = embedding_size
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :int = None
if self.use_input_mask:
lowercase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase :Tuple = None
if self.use_token_type_ids:
lowercase :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase :Union[str, Any] = None
lowercase :int = None
lowercase :str = None
if self.use_labels:
lowercase :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase :Optional[int] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Dict = TFMobileBertModel(config=snake_case__ )
lowercase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
lowercase :Optional[int] = [input_ids, input_mask]
lowercase :Optional[int] = model(snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Any = TFMobileBertForMaskedLM(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Optional[Any] = TFMobileBertForNextSentencePrediction(config=snake_case__ )
lowercase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[Any] = TFMobileBertForSequenceClassification(config=snake_case__ )
lowercase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :Tuple = self.num_choices
lowercase :Any = TFMobileBertForMultipleChoice(config=snake_case__ )
lowercase :Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[str] = TFMobileBertForTokenClassification(config=snake_case__ )
lowercase :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str ):
'''simple docstring'''
lowercase :Union[str, Any] = TFMobileBertForQuestionAnswering(config=snake_case__ )
lowercase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Dict = config_and_inputs
lowercase :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
@slow
def __snake_case ( self : int ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowercase :List[str] = TFMobileBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase :Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase :List[Any] = model(snake_case__ )[0]
lowercase :Union[str, Any] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case__ )
lowercase :Optional[int] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 677 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ = 50_00_00
lowercase_ , lowercase_ = os.path.split(__file__)
lowercase_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCAmelCase ( _lowercase : datasets.Dataset , **_lowercase : Tuple ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ = dataset.map(**a_ )
@get_duration
def UpperCAmelCase ( _lowercase : datasets.Dataset , **_lowercase : int ) -> str:
"""simple docstring"""
lowerCAmelCase_ = dataset.filter(**a_ )
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase_ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowerCAmelCase_ = generate_example_dataset(
os.path.join(a_ , '''dataset.arrow''' ) , a_ , num_examples=a_ )
lowerCAmelCase_ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=a_ )
def tokenize(_lowercase : Tuple ):
return tokenizer(examples['''text'''] )
lowerCAmelCase_ = map(a_ )
lowerCAmelCase_ = map(a_ , batched=a_ )
lowerCAmelCase_ = map(a_ , function=lambda _lowercase : None , batched=a_ )
with dataset.formatted_as(type='''numpy''' ):
lowerCAmelCase_ = map(a_ , function=lambda _lowercase : None , batched=a_ )
with dataset.formatted_as(type='''pandas''' ):
lowerCAmelCase_ = map(a_ , function=lambda _lowercase : None , batched=a_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowerCAmelCase_ = map(a_ , function=lambda _lowercase : None , batched=a_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowerCAmelCase_ = map(a_ , function=lambda _lowercase : None , batched=a_ )
lowerCAmelCase_ = map(a_ , function=a_ , batched=a_ )
lowerCAmelCase_ = filter(a_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a_ , '''wb''' ) as f:
f.write(json.dumps(a_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter() | 552 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCamelCase (a_ :int) -> List[str]:
random.seed(a_)
np.random.seed(a_)
torch.manual_seed(a_)
torch.cuda.manual_seed_all(a_)
# ^^ safe to call this function even if cuda is not available
class __magic_name__ :
def __init__( self : Optional[Any] , snake_case__ : Iterable[torch.nn.Parameter] , snake_case__ : float = 0.99_99 , snake_case__ : float = 0.0 , snake_case__ : int = 0 , snake_case__ : bool = False , snake_case__ : Union[float, int] = 1.0 , snake_case__ : Union[float, int] = 2 / 3 , snake_case__ : Optional[Any] = None , snake_case__ : Dict[str, Any] = None , **snake_case__ : Tuple , ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :int = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase :Optional[Any] = True
if kwargs.get('''max_value''' , snake_case__ ) is not None:
lowercase :Optional[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :Optional[int] = kwargs['''max_value''']
if kwargs.get('''min_value''' , snake_case__ ) is not None:
lowercase :List[Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :str = kwargs['''min_value''']
lowercase :Any = list(snake_case__ )
lowercase :Optional[Any] = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , snake_case__ ) is not None:
lowercase :str = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
self.to(device=kwargs['''device'''] )
lowercase :int = None
lowercase :int = decay
lowercase :Union[str, Any] = min_decay
lowercase :List[Any] = update_after_step
lowercase :Union[str, Any] = use_ema_warmup
lowercase :Any = inv_gamma
lowercase :Any = power
lowercase :str = 0
lowercase :int = None # set in `step()`
lowercase :List[str] = model_cls
lowercase :Any = model_config
@classmethod
def __snake_case ( cls : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase , lowercase :int = model_cls.load_config(snake_case__ , return_unused_kwargs=snake_case__ )
lowercase :List[Any] = model_cls.from_pretrained(snake_case__ )
lowercase :Optional[int] = cls(model.parameters() , model_cls=snake_case__ , model_config=model.config )
ema_model.load_state_dict(snake_case__ )
return ema_model
def __snake_case ( self : int , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
lowercase :Dict = self.model_cls.from_config(self.model_config )
lowercase :Tuple = self.state_dict()
state_dict.pop('''shadow_params''' , snake_case__ )
model.register_to_config(**snake_case__ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case__ )
def __snake_case ( self : int , snake_case__ : int ):
'''simple docstring'''
lowercase :Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase :int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase :Dict = (1 + step) / (1_0 + step)
lowercase :Optional[int] = min(snake_case__ , self.decay )
# make sure decay is not smaller than min_decay
lowercase :Optional[int] = max(snake_case__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __snake_case ( self : Any , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(snake_case__ , torch.nn.Module ):
lowercase :Tuple = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , )
lowercase :Union[str, Any] = parameters.parameters()
lowercase :Optional[Any] = list(snake_case__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase :List[Any] = self.get_decay(self.optimization_step )
lowercase :Optional[Any] = decay
lowercase :List[Any] = 1 - decay
lowercase :List[str] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase :Union[str, Any] = deepspeed.zero.GatheredParameters(snake_case__ , modifier_rank=snake_case__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case__ )
def __snake_case ( self : str , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :Optional[Any] = list(snake_case__ )
for s_param, param in zip(self.shadow_params , snake_case__ ):
param.data.copy_(s_param.to(param.device ).data )
def __snake_case ( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Dict=None ):
'''simple docstring'''
lowercase :str = [
p.to(device=snake_case__ , dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ )
for p in self.shadow_params
]
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __snake_case ( self : Optional[int] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase :str = [param.detach().cpu().clone() for param in parameters]
def __snake_case ( self : List[Any] , snake_case__ : Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , snake_case__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase :Dict = None
def __snake_case ( self : Union[str, Any] , snake_case__ : dict ):
'''simple docstring'''
lowercase :List[str] = copy.deepcopy(snake_case__ )
lowercase :Any = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
lowercase :int = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , snake_case__ ):
raise ValueError('''Invalid min_decay''' )
lowercase :List[Any] = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , snake_case__ ):
raise ValueError('''Invalid optimization_step''' )
lowercase :int = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , snake_case__ ):
raise ValueError('''Invalid update_after_step''' )
lowercase :Optional[int] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case__ ):
raise ValueError('''Invalid use_ema_warmup''' )
lowercase :Any = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
lowercase :Dict = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
lowercase :Optional[int] = state_dict.get('''shadow_params''' , snake_case__ )
if shadow_params is not None:
lowercase :List[Any] = shadow_params
if not isinstance(self.shadow_params , snake_case__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(snake_case__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 677 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCamelCase_ = None
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase_ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase_ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCamelCase_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowercase_ ( __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = NllbTokenizer
lowerCamelCase_ = []
lowerCamelCase_ = []
def __init__( self : List[Any] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : List[Any]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : str="<mask>" , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[str]=False , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else '''eng_Latn'''
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(snake_case__ )
_SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : str = "eng_Latn" , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "fra_Latn" , **__lowerCamelCase : int , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = src_lang
_SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self : Any , __lowerCamelCase : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE = [self.cur_lang_code]
_SCREAMING_SNAKE_CASE = [self.eos_token_id]
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 418 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :int , a_ :Union[str, Any] , a_ :List[Any]) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[int] , a_ :str , a_ :Any="attention") -> Optional[int]:
lowercase :Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :])
lowercase :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2])
lowercase :str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :])
lowercase :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2])
lowercase :int = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :])
lowercase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2])
lowercase :List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :])
lowercase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2])
return k, o, q, v
def lowerCamelCase (a_ :Any , a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> List[Any]:
if split_mlp_wi:
lowercase :List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase :Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase :Dict = (wi_a, wi_a)
else:
lowercase :Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase :Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase (a_ :Any , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase (a_ :dict , *, a_ :int , a_ :bool , a_ :bool = False) -> int:
lowercase :Dict = traverse_util.flatten_dict(variables['''target'''])
lowercase :Optional[Any] = {'''/'''.join(a_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase :str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , a_)
lowercase :str = collections.OrderedDict()
# Shared embeddings.
lowercase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Union[str, Any] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :Tuple = tax_attention_lookup(a_ , a_ , '''encoder''' , '''attention''')
lowercase :Dict = layer_norm
lowercase :Dict = k.T
lowercase :Union[str, Any] = o.T
lowercase :List[Any] = q.T
lowercase :int = v.T
# Block i, layer 1 (MLP).
lowercase :Optional[int] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :str = tax_mlp_lookup(a_ , a_ , '''encoder''' , a_)
lowercase :int = layer_norm
if split_mlp_wi:
lowercase :Tuple = wi[0].T
lowercase :Tuple = wi[1].T
else:
lowercase :int = wi.T
lowercase :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Dict = tax_relpos_bias_lookup(
a_ , a_ , '''encoder''').T
lowercase :str = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase :str = tax_relpos_bias_lookup(
a_ , 0 , '''encoder''').T
lowercase :List[Any] = tax_relpos_bias_lookup(
a_ , 0 , '''decoder''').T
if not is_encoder_only:
# Decoder.
for i in range(a_):
# Block i, layer 0 (Self Attention).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_self_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :str = tax_attention_lookup(a_ , a_ , '''decoder''' , '''self_attention''')
lowercase :List[str] = layer_norm
lowercase :Dict = k.T
lowercase :List[Any] = o.T
lowercase :List[Any] = q.T
lowercase :Any = v.T
# Block i, layer 1 (Cross Attention).
lowercase :Tuple = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_cross_attention_layer_norm''')
lowercase , lowercase , lowercase , lowercase :int = tax_attention_lookup(a_ , a_ , '''decoder''' , '''encoder_decoder_attention''')
lowercase :int = layer_norm
lowercase :Dict = k.T
lowercase :int = o.T
lowercase :List[Any] = q.T
lowercase :Tuple = v.T
# Block i, layer 2 (MLP).
lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_mlp_layer_norm''')
lowercase , lowercase :Tuple = tax_mlp_lookup(a_ , a_ , '''decoder''' , a_)
lowercase :Any = layer_norm
if split_mlp_wi:
lowercase :int = wi[0].T
lowercase :Union[str, Any] = wi[1].T
else:
lowercase :int = wi.T
lowercase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase :Union[str, Any] = tax_relpos_bias_lookup(a_ , a_ , '''decoder''').T
lowercase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase :int = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCamelCase (a_ :Dict , a_ :bool) -> Tuple:
lowercase :str = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase :Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase :Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''')
lowercase :Optional[int] = state_dict['''shared.weight''']
return state_dict
def lowerCamelCase (a_ :List[str] , a_ :List[str] , a_ :Tuple , a_ :Optional[int] , a_ :List[str]) -> List[str]:
lowercase :Optional[Any] = checkpoints.load_tax_checkpoint(a_)
lowercase :Optional[int] = convert_tax_to_pytorch(
a_ , num_layers=config.num_layers , is_encoder_only=a_ , scalable_attention=a_)
lowercase :Union[str, Any] = make_state_dict(a_ , a_)
model.load_state_dict(a_ , strict=a_)
def lowerCamelCase (a_ :str , a_ :Optional[int] , a_ :Any , a_ :bool = False , a_ :bool = False , ) -> Tuple:
lowercase :Optional[int] = MTaConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase :Union[str, Any] = UMTaEncoderModel(a_)
else:
lowercase :int = UMTaForConditionalGeneration(a_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(a_ , a_ , a_ , a_ , a_)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
model.save_pretrained(a_)
# Verify that we can load the checkpoint.
model.from_pretrained(a_)
print('''Done''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 677 | 0 |
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCAmelCase_ : Tuple = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
_a : int = from_type.lower().strip('s' )
_a : Optional[Any] = to_type.lower().strip('s' )
_a : Any = UNIT_SYMBOL.get(a_ , a_ )
_a : List[Any] = UNIT_SYMBOL.get(a_ , a_ )
if from_sanitized not in METRIC_CONVERSION:
_a : int = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(a_ )}'''
)
raise ValueError(a_ )
if to_sanitized not in METRIC_CONVERSION:
_a : Optional[int] = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(a_ )}'''
)
raise ValueError(a_ )
_a : List[Any] = METRIC_CONVERSION[from_sanitized]
_a : str = METRIC_CONVERSION[to_sanitized]
_a : Dict = 1
if from_exponent > to_exponent:
_a : Union[str, Any] = from_exponent - to_exponent
else:
_a : Optional[int] = -(to_exponent - from_exponent)
return value * pow(1_0 , a_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 120 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=1_8 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=False , ):
"""simple docstring"""
__magic_name__ :int = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
__magic_name__ :Union[str, Any] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
__magic_name__ :Dict = parent
__magic_name__ :List[str] = batch_size
__magic_name__ :Tuple = num_channels
__magic_name__ :Tuple = image_size
__magic_name__ :Any = min_resolution
__magic_name__ :Optional[Any] = max_resolution
__magic_name__ :Optional[int] = do_resize
__magic_name__ :str = size
__magic_name__ :Optional[int] = do_center_crop
__magic_name__ :Union[str, Any] = crop_size
__magic_name__ :List[str] = do_normalize
__magic_name__ :int = image_mean
__magic_name__ :List[str] = image_std
__magic_name__ :str = do_reduce_labels
def A ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Tuple = load_dataset('''hf-internal-testing/fixtures_ade20k''', split='''test''' )
__magic_name__ :List[str] = Image.open(dataset[0]['''file'''] )
__magic_name__ :int = Image.open(dataset[1]['''file'''] )
return image, map
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''', split='''test''' )
__magic_name__ :Union[str, Any] = Image.open(ds[0]['''file'''] )
__magic_name__ :str = Image.open(ds[1]['''file'''] )
__magic_name__ :List[str] = Image.open(ds[2]['''file'''] )
__magic_name__ :List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCamelCase_ ( __UpperCAmelCase , unittest.TestCase ):
a__ = BeitImageProcessor if is_vision_available() else None
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = BeitImageProcessingTester(self )
@property
def A ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case__ , '''size''' ) )
self.assertTrue(hasattr(snake_case__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(snake_case__ , '''center_crop''' ) )
self.assertTrue(hasattr(snake_case__ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case__ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case__ , '''image_std''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_0, '''width''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
self.assertEqual(image_processor.do_reduce_labels , snake_case__ )
__magic_name__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=snake_case__ )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
self.assertEqual(image_processor.do_reduce_labels , snake_case__ )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
__magic_name__ :Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ :List[Any] = image_processing(snake_case__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
__magic_name__ :Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ :str = image_processing(snake_case__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
__magic_name__ :int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ :str = image_processing(snake_case__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
__magic_name__ :Any = []
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__magic_name__ :Dict = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test batched
__magic_name__ :Tuple = image_processing(snake_case__ , snake_case__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
__magic_name__ :Optional[Any] = prepare_semantic_single_inputs()
__magic_name__ :Dict = image_processing(snake_case__ , snake_case__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test batched input (PIL images)
__magic_name__ :Optional[int] = prepare_semantic_batch_inputs()
__magic_name__ :Optional[int] = image_processing(snake_case__ , snake_case__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__magic_name__ :Dict = prepare_semantic_single_inputs()
__magic_name__ :Optional[int] = image_processing(snake_case__ , snake_case__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_5_0 )
__magic_name__ :int = True
__magic_name__ :Any = image_processing(snake_case__ , snake_case__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
| 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "donut-swin"
__A : Optional[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , snake_case__ : Any=2_2_4 , snake_case__ : Tuple=4 , snake_case__ : str=3 , snake_case__ : Dict=9_6 , snake_case__ : Optional[Any]=[2, 2, 6, 2] , snake_case__ : Any=[3, 6, 1_2, 2_4] , snake_case__ : List[str]=7 , snake_case__ : Dict=4.0 , snake_case__ : str=True , snake_case__ : Optional[int]=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Any=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Tuple=False , snake_case__ : int=0.02 , snake_case__ : Optional[Any]=1e-5 , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Union[str, Any] = image_size
lowercase :Optional[Any] = patch_size
lowercase :List[str] = num_channels
lowercase :Optional[int] = embed_dim
lowercase :Optional[Any] = depths
lowercase :List[Any] = len(snake_case__ )
lowercase :Optional[Any] = num_heads
lowercase :int = window_size
lowercase :str = mlp_ratio
lowercase :Optional[int] = qkv_bias
lowercase :Dict = hidden_dropout_prob
lowercase :Any = attention_probs_dropout_prob
lowercase :Any = drop_path_rate
lowercase :int = hidden_act
lowercase :int = use_absolute_embeddings
lowercase :List[str] = layer_norm_eps
lowercase :Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase :str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
| 677 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class __A ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , *_snake_case , **_snake_case ):
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case=None , _snake_case=None , _snake_case=None ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : Optional[int] = {}
if prompt is not None:
_lowerCAmelCase : int = prompt
if generate_kwargs is not None:
_lowerCAmelCase : Tuple = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : Tuple = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,"
" please use only one" )
_lowerCAmelCase : List[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _snake_case , **_snake_case ):
return super().__call__(snake_case__ , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=None ):
_lowerCAmelCase : List[str] = load_image(snake_case__ )
if prompt is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F"""Received an invalid text input, got - {type(snake_case__ )} - but expected a single string. """
"Note also that one single text can be provided for conditional image to text generation." )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=snake_case__ , return_tensors=self.framework )
_lowerCAmelCase : Union[str, Any] = self.tokenizer(text=snake_case__ , add_special_tokens=snake_case__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : int = torch.tensor(snake_case__ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Any = self.image_processor(images=snake_case__ , header_text=snake_case__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Tuple = self.image_processor(images=snake_case__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[Any] = self.tokenizer(snake_case__ , return_tensors=self.framework )
model_inputs.update(snake_case__ )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
_lowerCAmelCase : Dict = self.image_processor(images=snake_case__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Dict = None
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=None ):
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , snake_case__ )
and all(x is None for x in model_inputs["input_ids"] )
):
_lowerCAmelCase : Any = None
if generate_kwargs is None:
_lowerCAmelCase : str = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Union[str, Any] = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Any = self.model.generate(snake_case__ , **snake_case__ , **snake_case__ )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : Optional[Any] = []
for output_ids in model_outputs:
_lowerCAmelCase : List[str] = {
'''generated_text''': self.tokenizer.decode(
snake_case__ , skip_special_tokens=snake_case__ , )
}
records.append(snake_case__ )
return records
| 424 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase (a_ :Optional[int] , a_ :tuple , a_ :Path , a_ :str , a_ :int , a_ :List[Any] , a_ :Any , a_ :Union[str, Any]=False , ) -> Dict:
output_path.parent.mkdir(parents=a_ , exist_ok=a_)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , )
else:
export(
a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , )
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :int , a_ :bool = False) -> Union[str, Any]:
lowercase :Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase :Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''')
else:
lowercase :List[str] = '''cpu'''
lowercase :List[str] = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_).to(a_)
lowercase :List[Any] = Path(a_)
# TEXT ENCODER
lowercase :List[Any] = pipeline.text_encoder.config.max_position_embeddings
lowercase :Dict = pipeline.text_encoder.config.hidden_size
lowercase :Union[str, Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa)) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , )
del pipeline.text_encoder
# UNET
lowercase :Any = pipeline.unet.config.in_channels
lowercase :List[Any] = pipeline.unet.config.sample_size
lowercase :Optional[int] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a_ , a_ , a_).to(device=a_ , dtype=a_),
torch.randn(2).to(device=a_ , dtype=a_),
torch.randn(2 , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=a_ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=a_ , use_external_data_format=a_ , )
lowercase :List[Any] = str(unet_path.absolute().as_posix())
lowercase :str = os.path.dirname(a_)
lowercase :Optional[Any] = onnx.load(a_)
# clean up existing tensor files
shutil.rmtree(a_)
os.mkdir(a_)
# collate external tensor files into one
onnx.save_model(
a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location='''weights.pb''' , convert_attribute=a_ , )
del pipeline.unet
# VAE ENCODER
lowercase :Tuple = pipeline.vae
lowercase :Optional[Any] = vae_encoder.config.in_channels
lowercase :Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase :Any = lambda a_ , a_: vae_encoder.encode(a_ , a_)[0].sample()
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
# VAE DECODER
lowercase :Any = pipeline.vae
lowercase :Dict = vae_decoder.config.latent_channels
lowercase :Union[str, Any] = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase :List[Any] = vae_encoder.decode
onnx_export(
a_ , model_args=(
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=a_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase :Dict = pipeline.safety_checker
lowercase :str = safety_checker.config.vision_config.num_channels
lowercase :str = safety_checker.config.vision_config.image_size
lowercase :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_),
torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=a_ , )
del pipeline.safety_checker
lowercase :Tuple = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''')
lowercase :Optional[Any] = pipeline.feature_extractor
else:
lowercase :int = None
lowercase :Union[str, Any] = None
lowercase :Optional[int] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''') , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a_)
print('''ONNX pipeline saved to''' , a_)
del pipeline
del onnx_pipeline
lowercase :Tuple = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider='''CPUExecutionProvider''')
print('''ONNX pipeline is loadable''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCAmelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 677 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 400 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase (a_ :List[Any] , a_ :Union[str, Any] , a_ :Tuple , a_ :List[str] , a_ :str=True , a_ :str="pt") -> List[str]:
lowercase :Optional[int] = {'''add_prefix_space''': True} if isinstance(a_ , a_) and not line.startswith(''' ''') else {}
lowercase :Optional[int] = padding_side
return tokenizer(
[line] , max_length=a_ , padding='''max_length''' if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , )
def lowerCamelCase (a_ :str , a_ :Tuple , a_ :Optional[Any]=None , ) -> Tuple:
lowercase :Optional[Any] = input_ids.ne(a_).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str="train" , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Dict="" , ):
'''simple docstring'''
super().__init__()
lowercase :Tuple = Path(snake_case__ ).joinpath(type_path + '''.source''' )
lowercase :Union[str, Any] = Path(snake_case__ ).joinpath(type_path + '''.target''' )
lowercase :List[Any] = self.get_char_lens(self.src_file )
lowercase :Tuple = max_source_length
lowercase :Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase :Any = tokenizer
lowercase :Tuple = prefix
if n_obs is not None:
lowercase :List[str] = self.src_lens[:n_obs]
lowercase :List[Any] = src_lang
lowercase :str = tgt_lang
def __len__( self : Any ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : str , snake_case__ : Any ):
'''simple docstring'''
lowercase :Optional[int] = index + 1 # linecache starts at 1
lowercase :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip('''\n''' )
lowercase :Dict = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase :Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
lowercase :Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
lowercase :Optional[int] = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''' )
lowercase :Tuple = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''' )
lowercase :List[str] = source_inputs['''input_ids'''].squeeze()
lowercase :Optional[Any] = target_inputs['''input_ids'''].squeeze()
lowercase :List[str] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __snake_case ( snake_case__ : Optional[int] ):
'''simple docstring'''
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
lowercase :Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowercase :str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :Optional[int] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
lowercase :List[Any] = trim_batch(snake_case__ , snake_case__ )
lowercase , lowercase :List[str] = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
lowercase :Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase = getLogger(__name__)
def lowerCamelCase (a_ :List[List]) -> Tuple:
return list(itertools.chain.from_iterable(a_))
def lowerCamelCase (a_ :str) -> None:
lowercase :List[str] = get_git_info()
save_json(a_ , os.path.join(a_ , '''git_log.json'''))
def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=4 , **a_ :Optional[Any]) -> str:
with open(a_ , '''w''') as f:
json.dump(a_ , a_ , indent=a_ , **a_)
def lowerCamelCase (a_ :Dict) -> Union[str, Any]:
with open(a_) as f:
return json.load(a_)
def lowerCamelCase () -> List[str]:
lowercase :Dict = git.Repo(search_parent_directories=a_)
lowercase :int = {
'''repo_id''': str(a_),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase (a_ :Callable , a_ :Iterable) -> List:
return list(map(a_ , a_))
def lowerCamelCase (a_ :Optional[Any] , a_ :str) -> Any:
with open(a_ , '''wb''') as f:
return pickle.dump(a_ , a_)
def lowerCamelCase (a_ :List[str]) -> List[str]:
def remove_articles(a_ :Union[str, Any]):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , a_)
def white_space_fix(a_ :Tuple):
return " ".join(text.split())
def remove_punc(a_ :int):
lowercase :List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(a_ :int):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_))))
def lowerCamelCase (a_ :List[str] , a_ :Any) -> List[str]:
lowercase :Dict = normalize_answer(a_).split()
lowercase :int = normalize_answer(a_).split()
lowercase :List[Any] = Counter(a_) & Counter(a_)
lowercase :Optional[int] = sum(common.values())
if num_same == 0:
return 0
lowercase :str = 1.0 * num_same / len(a_)
lowercase :Tuple = 1.0 * num_same / len(a_)
lowercase :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase (a_ :Tuple , a_ :Optional[Any]) -> List[Any]:
return normalize_answer(a_) == normalize_answer(a_)
def lowerCamelCase (a_ :List[str] , a_ :List[str]) -> Dict:
assert len(a_) == len(a_)
lowercase :Any = 0
for hypo, pred in zip(a_ , a_):
em += exact_match_score(a_ , a_)
if len(a_) > 0:
em /= len(a_)
return {"em": em}
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
return model_prefix.startswith('''rag''')
def lowerCamelCase (a_ :List[str] , a_ :Tuple , a_ :List[str]) -> Any:
lowercase :List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase :str = '''dropout_rate'''
for p in extra_params:
if getattr(a_ , a_ , a_):
if not hasattr(a_ , a_) and not hasattr(a_ , equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(a_))
delattr(a_ , a_)
continue
lowercase :List[str] = p if hasattr(a_ , a_) else equivalent_param[p]
setattr(a_ , a_ , getattr(a_ , a_))
delattr(a_ , a_)
return hparams, config
| 677 | 0 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase__ =logging.getLogger(__name__)
class a_ ( __UpperCAmelCase ):
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ):
a_ = self.layer[current_layer](snake_case__ , snake_case__ , head_mask[current_layer] )
a_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __UpperCAmelCase , )
class a_ ( __UpperCAmelCase ):
def __init__( self , UpperCAmelCase ):
super().__init__(snake_case__ )
a_ = BertEncoderWithPabee(snake_case__ )
self.init_weights()
a_ = 0
a_ = 0
a_ = 0
a_ = 0
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = threshold
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = patience
def lowerCAmelCase__ ( self ):
a_ = 0
a_ = 0
def lowerCAmelCase__ ( self ):
a_ = self.inference_layers_num / self.inference_instances_num
a_ = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(snake_case__ )
@add_start_docstrings_to_model_forward(snake_case__ )
def lowerCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
a_ = input_ids.size()
elif inputs_embeds is not None:
a_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
a_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a_ = torch.ones(snake_case__ , device=snake_case__ )
if token_type_ids is None:
a_ = torch.zeros(snake_case__ , dtype=torch.long , device=snake_case__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a_ = self.get_extended_attention_mask(snake_case__ , snake_case__ , snake_case__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
a_ = encoder_hidden_states.size()
a_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
a_ = torch.ones(snake_case__ , device=snake_case__ )
a_ = self.invert_attention_mask(snake_case__ )
else:
a_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a_ = self.get_head_mask(snake_case__ , self.config.num_hidden_layers )
a_ = self.embeddings(
input_ids=snake_case__ , position_ids=snake_case__ , token_type_ids=snake_case__ , inputs_embeds=snake_case__ )
a_ = embedding_output
if self.training:
a_ = []
for i in range(self.config.num_hidden_layers ):
a_ = self.encoder.adaptive_forward(
snake_case__ , current_layer=snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ )
a_ = self.pooler(snake_case__ )
a_ = output_layers[i](output_dropout(snake_case__ ) )
res.append(snake_case__ )
elif self.patience == 0: # Use all layers for inference
a_ = self.encoder(
snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
a_ = self.pooler(encoder_outputs[0] )
a_ = [output_layers[self.config.num_hidden_layers - 1](snake_case__ )]
else:
a_ = 0
a_ = None
a_ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
a_ = self.encoder.adaptive_forward(
snake_case__ , current_layer=snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ )
a_ = self.pooler(snake_case__ )
a_ = output_layers[i](snake_case__ )
if regression:
a_ = logits.detach()
if patient_result is not None:
a_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
a_ = 0
else:
a_ = logits.detach().argmax(dim=1 )
if patient_result is not None:
a_ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case__ ) ):
patient_counter += 1
else:
a_ = 0
a_ = logits
if patient_counter == self.patience:
break
a_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __UpperCAmelCase , )
class a_ ( __UpperCAmelCase ):
def __init__( self , UpperCAmelCase ):
super().__init__(snake_case__ )
a_ = config.num_labels
a_ = BertModelWithPabee(snake_case__ )
a_ = nn.Dropout(config.hidden_dropout_prob )
a_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case__ )
def lowerCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ):
a_ = self.bert(
input_ids=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , position_ids=snake_case__ , head_mask=snake_case__ , inputs_embeds=snake_case__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
a_ = (logits[-1],)
if labels is not None:
a_ = None
a_ = 0
for ix, logits_item in enumerate(snake_case__ ):
if self.num_labels == 1:
# We are doing regression
a_ = MSELoss()
a_ = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
a_ = CrossEntropyLoss()
a_ = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
a_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
a_ = (total_loss / total_weights,) + outputs
return outputs
| 263 |
"""simple docstring"""
def lowerCamelCase (a_ :Tuple , a_ :int , a_ :Tuple , a_ :List[Any]) -> str:
if height >= 1:
move_tower(height - 1 , a_ , a_ , a_)
move_disk(a_ , a_)
move_tower(height - 1 , a_ , a_ , a_)
def lowerCamelCase (a_ :int , a_ :Union[str, Any]) -> str:
print('''moving disk from''' , a_ , '''to''' , a_)
def lowerCamelCase () -> Tuple:
lowercase :int = int(input('''Height of hanoi: ''').strip())
move_tower(a_ , '''A''' , '''B''' , '''C''')
if __name__ == "__main__":
main()
| 677 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = str(a_ )
return n == n[::-1]
def lowerCamelCase__ ( __snake_case = 1_00_00_00 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0
for i in range(1, a_ ):
if is_palindrome(a_ ) and is_palindrome(bin(a_ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 19 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCAmelCase = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCAmelCase = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __snake_case ( self : List[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : str=None , snake_case__ : List[Any]="uniform_average" , snake_case__ : Dict=True ):
'''simple docstring'''
lowercase :Dict = mean_squared_error(
snake_case__ , snake_case__ , sample_weight=snake_case__ , multioutput=snake_case__ , squared=snake_case__ )
return {"mse": mse}
| 677 | 0 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
assert isinstance(a_ , a_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: Union[str, Any] = tmp_path / '''cache'''
lowercase__: Any = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__: List[str] = TextDatasetReader(a_ , cache_dir=a_ , keep_in_memory=a_ ).read()
_check_text_dataset(a_ , a_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: List[str] = tmp_path / '''cache'''
lowercase__: int = {'''text''': '''string'''}
lowercase__: Optional[int] = features.copy() if features else default_expected_features
lowercase__: int = (
Features({feature: Value(a_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__: Optional[Any] = TextDatasetReader(a_ , features=a_ , cache_dir=a_ ).read()
_check_text_dataset(a_ , a_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: str = tmp_path / '''cache'''
lowercase__: int = {'''text''': '''string'''}
lowercase__: List[Any] = TextDatasetReader(a_ , cache_dir=a_ , split=a_ ).read()
_check_text_dataset(a_ , a_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
if issubclass(a_ , a_ ):
lowercase__: Union[str, Any] = text_path
elif issubclass(a_ , a_ ):
lowercase__: Optional[int] = [text_path]
lowercase__: Optional[Any] = tmp_path / '''cache'''
lowercase__: Any = {'''text''': '''string'''}
lowercase__: Optional[Any] = TextDatasetReader(a_ , cache_dir=a_ ).read()
_check_text_dataset(a_ , a_ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=("train",) ) -> int:
assert isinstance(a_ , a_ )
for split in splits:
lowercase__: List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowercase__: Tuple = tmp_path / '''cache'''
lowercase__: Dict = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__: Any = TextDatasetReader({'''train''': text_path} , cache_dir=a_ , keep_in_memory=a_ ).read()
_check_text_datasetdict(a_ , a_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowercase__: Optional[Any] = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowercase__: Union[str, Any] = {'''text''': '''string'''}
lowercase__: List[str] = features.copy() if features else default_expected_features
lowercase__: List[str] = (
Features({feature: Value(a_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase__: Optional[Any] = TextDatasetReader({'''train''': text_path} , features=a_ , cache_dir=a_ ).read()
_check_text_datasetdict(a_ , a_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
if split:
lowercase__: Union[str, Any] = {split: text_path}
else:
lowercase__: Tuple = '''train'''
lowercase__: Tuple = {'''train''': text_path, '''test''': text_path}
lowercase__: Union[str, Any] = tmp_path / '''cache'''
lowercase__: Dict = {'''text''': '''string'''}
lowercase__: Union[str, Any] = TextDatasetReader(a_ , cache_dir=a_ ).read()
_check_text_datasetdict(a_ , a_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 586 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def __snake_case ( snake_case__ : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError()
| 677 | 0 |
import socket
def UpperCamelCase ( ):
snake_case : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case : List[Any] = socket.gethostname()
snake_case : Optional[Any] = 12312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
snake_case : List[Any] = sock.recv(1024 )
if not data:
break
out_file.write(a_ )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 204 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 45 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : int=3_2 , snake_case__ : int=3 , snake_case__ : str=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : int=[1, 1, 2, 1] , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase :Union[str, Any] = parent
lowercase :Optional[Any] = batch_size
lowercase :Dict = image_size
lowercase :Any = num_channels
lowercase :List[str] = embeddings_size
lowercase :Union[str, Any] = hidden_sizes
lowercase :Any = depths
lowercase :Dict = is_training
lowercase :Any = use_labels
lowercase :Any = hidden_act
lowercase :List[str] = num_labels
lowercase :List[Any] = scope
lowercase :int = len(snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :Union[str, Any] = self.get_config()
return config, pixel_values
def __snake_case ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Any = FlaxRegNetModel(config=snake_case__ )
lowercase :str = model(snake_case__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ):
'''simple docstring'''
lowercase :Tuple = self.num_labels
lowercase :str = FlaxRegNetForImageClassification(config=snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = self.prepare_config_and_inputs()
lowercase , lowercase :Tuple = config_and_inputs
lowercase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__A : str = False
__A : Tuple = False
__A : Dict = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Dict = FlaxRegNetModelTester(self )
lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(snake_case__ )
lowercase :int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Tuple = [*signature.parameters.keys()]
lowercase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowercase :int = model_class(snake_case__ )
lowercase :Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase :Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase :Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase :List[Any] = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ : str , **snake_case__ : Optional[int] ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest('''JIT Enabled''' ):
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase () -> Tuple:
lowercase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_flax
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase :Optional[Any] = self.default_image_processor
lowercase :Dict = prepare_img()
lowercase :Any = image_processor(images=snake_case__ , return_tensors='''np''' )
lowercase :List[str] = model(**snake_case__ )
# verify the logits
lowercase :Any = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase :List[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 677 | 0 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCAmelCase ( _lowercase : int ) -> str:
"""simple docstring"""
if not isinstance(a_ , a_ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
lowerCAmelCase_ = precision
lowerCAmelCase_ = ceil(precision / 1_4 )
lowerCAmelCase_ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1_3_5_9_1_4_0_9
lowerCAmelCase_ = Decimal(a_ )
for k in range(1 , a_ ):
lowerCAmelCase_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(a_ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowercase_ = 50
print(f"""The first {n} digits of pi is: {pi(n)}""") | 552 |
"""simple docstring"""
UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase (a_ :dict , a_ :List[str] , a_ :Tuple) -> list[str]:
lowercase :str = set()
# keep track of all the paths to be checked
lowercase :Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase :Optional[int] = queue.pop(0)
# get the last node from the path
lowercase :Any = path[-1]
if node not in explored:
lowercase :int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase :List[Any] = list(a_)
new_path.append(a_)
queue.append(a_)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a_)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase (a_ :dict , a_ :List[Any] , a_ :List[Any]) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase :List[str] = [start]
lowercase :Optional[Any] = set(a_)
# Keep tab on distances from `start` node.
lowercase :Union[str, Any] = {start: 0, target: -1}
while queue:
lowercase :Union[str, Any] = queue.pop(0)
if node == target:
lowercase :Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a_)
queue.append(a_)
lowercase :Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 677 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : list[int] ) -> bool:
return len(set(a_ ) ) == len(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase (a_ :str , a_ :List[str]=100 , a_ :Optional[Any]=" ") -> List[str]:
lowercase :str = text.split(a_)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(a_) , a_)]
def lowerCamelCase (a_ :dict) -> dict:
lowercase , lowercase :str = [], []
for title, text in zip(documents['''title'''] , documents['''text''']):
if text is not None:
for passage in split_text(a_):
titles.append(title if title is not None else '''''')
texts.append(a_)
return {"title": titles, "text": texts}
def lowerCamelCase (a_ :dict , a_ :DPRContextEncoder , a_ :DPRContextEncoderTokenizerFast) -> dict:
lowercase :Tuple = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''')['''input_ids''']
lowercase :Optional[Any] = ctx_encoder(input_ids.to(device=a_) , return_dict=a_).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase (a_ :"RagExampleArguments" , a_ :"ProcessingArguments" , a_ :"IndexHnswArguments" , ) -> Any:
######################################
logger.info('''Step 1 - Create the dataset''')
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase :List[Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase :Optional[Any] = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase :str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=a_)
lowercase :Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase :str = Features(
{'''text''': Value('''string'''), '''title''': Value('''string'''), '''embeddings''': Sequence(Value('''float32'''))}) # optional, save as float32 instead of float64 to save space
lowercase :Optional[Any] = dataset.map(
partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , )
# And finally save your dataset
lowercase :str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''')
dataset.save_to_disk(a_)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''')
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase :str = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('''embeddings''' , custom_index=a_)
# And save the index
lowercase :Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''')
dataset.get_index('''embeddings''').save(a_)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __magic_name__ :
__A : str = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
__A : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
__A : Optional[str] = field(
default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class __magic_name__ :
__A : Optional[int] = field(
default=__UpperCAmelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
__A : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class __magic_name__ :
__A : int = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
__A : int = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 677 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class a ( __UpperCAmelCase ):
'''simple docstring'''
@require_torch
def __UpperCamelCase ( self ) -> Dict:
_a : Optional[Any] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a : Any = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a : Tuple = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a : str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='fill-mask' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a : Any = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : List[Any] = '''1'''
_a : List[str] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __UpperCamelCase ( self ) -> Optional[int]:
_a : List[str] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a : Dict = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a : List[Any] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a : str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='fill-mask' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
_a : List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __UpperCamelCase ( self ) -> Dict:
_a : str = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a : Union[str, Any] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a : Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a : Union[str, Any] = self.get_env()
_a : str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
_a : Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Any = '''1'''
_a : Optional[Any] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __UpperCamelCase ( self ) -> Any:
_a : Dict = '''
from transformers import pipeline
'''
_a : Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a : Dict = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a : Tuple = self.get_env()
_a : Optional[Any] = '''1'''
_a : Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a : str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def __UpperCamelCase ( self ) -> Dict:
_a : List[Any] = '''
from transformers import AutoModel
'''
_a : Union[str, Any] = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a : List[str] = self.get_env()
_a : Optional[int] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : List[Any] = '''1'''
_a : Tuple = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 120 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = f'''{sampling_rate}'''
__magic_name__ :int = '''1'''
__magic_name__ :List[str] = '''f32le'''
__magic_name__ :Any = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a_, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
__magic_name__ :Union[str, Any] = ffmpeg_process.communicate(a_ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
__magic_name__ :Dict = output_stream[0]
__magic_name__ :Tuple = np.frombuffer(a_, np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __lowercase ( snake_case, snake_case, snake_case = "f32le", ):
"""simple docstring"""
__magic_name__ :Tuple = f'''{sampling_rate}'''
__magic_name__ :List[Any] = '''1'''
if format_for_conversion == "s16le":
__magic_name__ :List[str] = 2
elif format_for_conversion == "f32le":
__magic_name__ :Optional[int] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__magic_name__ :Union[str, Any] = platform.system()
if system == "Linux":
__magic_name__ :str = '''alsa'''
__magic_name__ :List[str] = '''default'''
elif system == "Darwin":
__magic_name__ :Optional[int] = '''avfoundation'''
__magic_name__ :Any = ''':0'''
elif system == "Windows":
__magic_name__ :Tuple = '''dshow'''
__magic_name__ :Optional[int] = '''default'''
__magic_name__ :int = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
__magic_name__ :Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__magic_name__ :int = _ffmpeg_stream(a_, a_ )
for item in iterator:
yield item
def __lowercase ( snake_case, snake_case, snake_case = None, snake_case = None, snake_case = "f32le", ):
"""simple docstring"""
if stream_chunk_s is not None:
__magic_name__ :Tuple = stream_chunk_s
else:
__magic_name__ :Any = chunk_length_s
__magic_name__ :str = ffmpeg_microphone(a_, a_, format_for_conversion=a_ )
if format_for_conversion == "s16le":
__magic_name__ :Optional[int] = np.intaa
__magic_name__ :Optional[int] = 2
elif format_for_conversion == "f32le":
__magic_name__ :Dict = np.floataa
__magic_name__ :List[Any] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__magic_name__ :List[Any] = chunk_length_s / 6
__magic_name__ :Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a_, (int, float) ):
__magic_name__ :str = [stride_length_s, stride_length_s]
__magic_name__ :Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__magic_name__ :str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__magic_name__ :str = datetime.datetime.now()
__magic_name__ :Union[str, Any] = datetime.timedelta(seconds=a_ )
for item in chunk_bytes_iter(a_, a_, stride=(stride_left, stride_right), stream=a_ ):
# Put everything back in numpy scale
__magic_name__ :List[str] = np.frombuffer(item['''raw'''], dtype=a_ )
__magic_name__ :Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
__magic_name__ :Dict = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def __lowercase ( snake_case, snake_case, snake_case, snake_case = False ):
"""simple docstring"""
__magic_name__ :Dict = b''''''
__magic_name__ :str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__magic_name__ :Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(a_ ) < chunk_len:
__magic_name__ :Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a_ ) >= chunk_len:
# We are flushing the accumulator
__magic_name__ :List[str] = (_stride_left, stride_right)
__magic_name__ :Any = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
__magic_name__ :Optional[Any] = False
yield item
__magic_name__ :Optional[Any] = stride_left
__magic_name__ :Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a_ ) > stride_left:
__magic_name__ :Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
__magic_name__ :str = False
yield item
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = 2**2_4 # 16Mo
try:
with subprocess.Popen(a_, stdout=subprocess.PIPE, bufsize=a_ ) as ffmpeg_process:
while True:
__magic_name__ :List[str] = ffmpeg_process.stdout.read(a_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 0 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ :
def __init__( self : Tuple , snake_case__ : str = None , snake_case__ : uuid.UUID = None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None ):
'''simple docstring'''
if not conversation_id:
lowercase :List[Any] = uuid.uuida()
if past_user_inputs is None:
lowercase :Union[str, Any] = []
if generated_responses is None:
lowercase :List[str] = []
lowercase :uuid.UUID = conversation_id
lowercase :List[str] = past_user_inputs
lowercase :List[str] = generated_responses
lowercase :Optional[str] = text
def __eq__( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __snake_case ( self : Optional[int] , snake_case__ : str , snake_case__ : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
lowercase :List[str] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowercase :Optional[int] = text
def __snake_case ( self : Any ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase :Tuple = None
def __snake_case ( self : Tuple , snake_case__ : str ):
'''simple docstring'''
self.generated_responses.append(snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict ):
'''simple docstring'''
lowercase :int = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowercase :Dict = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__UpperCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if self.tokenizer.pad_token_id is None:
lowercase :Any = self.tokenizer.eos_token
def __snake_case ( self : List[Any] , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :str = {}
lowercase :List[str] = {}
lowercase :Tuple = {}
if min_length_for_response is not None:
lowercase :Dict = min_length_for_response
if minimum_tokens is not None:
lowercase :Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
lowercase :List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase :Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , snake_case__ : Union[Conversation, List[Conversation]] , snake_case__ : int=0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :int = super().__call__(snake_case__ , num_workers=snake_case__ , **snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1:
return outputs[0]
return outputs
def __snake_case ( self : List[Any] , snake_case__ : Conversation , snake_case__ : Any=3_2 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowercase :List[str] = self.tokenizer._build_conversation_input_ids(snake_case__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase :List[str] = self._legacy_parse_and_tokenize(snake_case__ )
if self.framework == "pt":
lowercase :int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase :Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __snake_case ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Any=1_0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowercase :Optional[Any] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowercase :int = max_length - minimum_tokens
lowercase :int = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowercase :int = model_inputs['''attention_mask'''][:, -trim:]
lowercase :int = model_inputs.pop('''conversation''' )
lowercase :Union[str, Any] = max_length
lowercase :Dict = self.model.generate(**snake_case__ , **snake_case__ )
if self.model.config.is_encoder_decoder:
lowercase :List[Any] = 1
else:
lowercase :Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any]=True ):
'''simple docstring'''
lowercase :Dict = model_outputs['''output_ids''']
lowercase :Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , )
lowercase :Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(snake_case__ )
return conversation
def __snake_case ( self : List[Any] , snake_case__ : Conversation ):
'''simple docstring'''
lowercase :str = self.tokenizer.eos_token_id
lowercase :List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
if len(snake_case__ ) > self.tokenizer.model_max_length:
lowercase :List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 677 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class __A ( __UpperCAmelCase ):
'''simple docstring'''
a_ = "xlm-prophetnet"
a_ = ["past_key_values"]
a_ = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , _snake_case = 0.1 , _snake_case = "gelu" , _snake_case = 3_0522 , _snake_case = 1024 , _snake_case = 4096 , _snake_case = 12 , _snake_case = 16 , _snake_case = 4096 , _snake_case = 12 , _snake_case = 16 , _snake_case = 0.1 , _snake_case = 0.1 , _snake_case = 512 , _snake_case = 0.02 , _snake_case = True , _snake_case = True , _snake_case = 0 , _snake_case = 2 , _snake_case = 32 , _snake_case = 128 , _snake_case = False , _snake_case = 0.0 , _snake_case = True , _snake_case = 0 , _snake_case = 1 , _snake_case = 2 , **_snake_case , ):
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = num_encoder_layers
_lowerCAmelCase : Dict = num_encoder_attention_heads
_lowerCAmelCase : List[str] = decoder_ffn_dim
_lowerCAmelCase : Dict = num_decoder_layers
_lowerCAmelCase : List[Any] = num_decoder_attention_heads
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Tuple = init_std # Normal(0, this parameter)
_lowerCAmelCase : int = activation_function
# parameters for xlmprophetnet
_lowerCAmelCase : Dict = ngram
_lowerCAmelCase : Optional[Any] = num_buckets
_lowerCAmelCase : Dict = relative_max_distance
_lowerCAmelCase : List[Any] = disable_ngram_loss
_lowerCAmelCase : Optional[Any] = eps
# 3 Types of Dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : List[str] = dropout
_lowerCAmelCase : List[str] = use_cache
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 424 |
"""simple docstring"""
def lowerCamelCase (a_ :int = 100) -> int:
lowercase :Union[str, Any] = set()
lowercase :List[Any] = 0
lowercase :Dict = n + 1 # maximum limit
for a in range(2 , a_):
for b in range(2 , a_):
lowercase :Tuple = a**b # calculates the current power
collect_powers.add(a_) # adds the result to the set
return len(a_)
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 677 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = 2
while True:
snake_case_ = factor_map.pop(a_ , a_ )
if factor:
snake_case_ = factor + prime
while x in factor_map:
x += factor
snake_case_ = factor
else:
snake_case_ = prime
yield prime
prime += 1
def UpperCamelCase_( snake_case : float = 1e10 ):
'''simple docstring'''
snake_case_ = sieve()
snake_case_ = 1
while True:
snake_case_ = next(a_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(a_ )
n += 2
if __name__ == "__main__":
print(solution())
| 400 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[Any] = "xlm-prophetnet"
__A : List[str] = ["past_key_values"]
__A : int = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : Any , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[Union[str, Callable]] = "gelu" , snake_case__ : Optional[int] = 3_0_5_2_2 , snake_case__ : Optional[int] = 1_0_2_4 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[int] = 5_1_2 , snake_case__ : Optional[float] = 0.02 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 2 , snake_case__ : Optional[int] = 3_2 , snake_case__ : Optional[int] = 1_2_8 , snake_case__ : Optional[bool] = False , snake_case__ : Optional[float] = 0.0 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 1 , snake_case__ : Optional[int] = 2 , **snake_case__ : List[str] , ):
'''simple docstring'''
lowercase :Tuple = vocab_size
lowercase :Optional[int] = hidden_size
lowercase :Optional[int] = encoder_ffn_dim
lowercase :Optional[int] = num_encoder_layers
lowercase :Dict = num_encoder_attention_heads
lowercase :List[str] = decoder_ffn_dim
lowercase :Dict = num_decoder_layers
lowercase :List[Any] = num_decoder_attention_heads
lowercase :Optional[int] = max_position_embeddings
lowercase :Tuple = init_std # Normal(0, this parameter)
lowercase :int = activation_function
# parameters for xlmprophetnet
lowercase :Dict = ngram
lowercase :Optional[Any] = num_buckets
lowercase :Dict = relative_max_distance
lowercase :List[Any] = disable_ngram_loss
lowercase :Optional[Any] = eps
# 3 Types of Dropout
lowercase :Any = attention_dropout
lowercase :List[str] = activation_dropout
lowercase :List[str] = dropout
lowercase :List[str] = use_cache
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 677 | 0 |
'''simple docstring'''
from __future__ import annotations
class a_ :
def __init__( self , UpperCAmelCase=None ):
a_ = data
a_ = None
def __repr__( self ):
a_ = []
a_ = self
while temp:
string_rep.append(f'''{temp.data}''' )
a_ = temp.next
return "->".join(snake_case__ )
def UpperCamelCase_ ( A__ ):
if not elements_list:
raise Exception("""The Elements List is empty""" )
a_ = Node(elements_list[0] )
for i in range(1 , len(a_ ) ):
a_ = Node(elements_list[i] )
a_ = current.next
return head
def UpperCamelCase_ ( A__ ):
if head_node is not None and isinstance(a_ , a_ ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCamelCase_ ( ):
from doctest import testmod
testmod()
a_ = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(a_ )
print("""Elements in Reverse:""" )
print_reverse(a_ )
if __name__ == "__main__":
main()
| 263 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 0 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = """https://openaipublic.azureedge.net/jukebox/models/"""
_a = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
_UpperCamelCase = key.replace('''.model.1.bias''', '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
_UpperCamelCase = key.replace('''.model.1.weight''', '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
_UpperCamelCase = key.replace('''.model.3.bias''', '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
_UpperCamelCase = key.replace('''.model.3.weight''', '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
_UpperCamelCase = key.replace('''conditioner_blocks.0''', '''conditioner_blocks''' )
if "prime_prior" in key:
_UpperCamelCase = key.replace('''prime_prior''', '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCamelCase = key.replace('''.emb.''', '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''', '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''', '''metadata_embedding.''' )
if "x_emb.emb." in key:
_UpperCamelCase = key.replace('''0.x_emb.emb''', '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''', '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''', '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''', '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''', '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''', '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''', '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''', '''embed_tokens''' )
return key
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {}
import re
_UpperCamelCase = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
_UpperCamelCase = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_UpperCamelCase = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
_UpperCamelCase = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
_UpperCamelCase = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_UpperCamelCase = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
_UpperCamelCase = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
_UpperCamelCase = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_UpperCamelCase = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(a_ ):
_UpperCamelCase = re_encoder_block_conv_in.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCamelCase = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
_UpperCamelCase = re_encoder_block_conv_in.sub(a_, a_ )
elif re_encoder_block_resnet.fullmatch(a_ ):
_UpperCamelCase = re_encoder_block_resnet.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCamelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
_UpperCamelCase = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
_UpperCamelCase = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCamelCase = prefix + resnet_block
_UpperCamelCase = re_encoder_block_resnet.sub(a_, a_ )
elif re_encoder_block_proj_out.fullmatch(a_ ):
_UpperCamelCase = re_encoder_block_proj_out.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
_UpperCamelCase = re_encoder_block_proj_out.sub(a_, a_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(a_ ):
_UpperCamelCase = re_decoder_block_conv_out.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCamelCase = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
_UpperCamelCase = re_decoder_block_conv_out.sub(a_, a_ )
elif re_decoder_block_resnet.fullmatch(a_ ):
_UpperCamelCase = re_decoder_block_resnet.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCamelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
_UpperCamelCase = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
_UpperCamelCase = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCamelCase = prefix + resnet_block
_UpperCamelCase = re_decoder_block_resnet.sub(a_, a_ )
elif re_decoder_block_proj_in.fullmatch(a_ ):
_UpperCamelCase = re_decoder_block_proj_in.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
_UpperCamelCase = re_decoder_block_proj_in.sub(a_, a_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(a_ ):
_UpperCamelCase = re_prior_cond_conv_out.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCamelCase = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
_UpperCamelCase = re_prior_cond_conv_out.sub(a_, a_ )
elif re_prior_cond_resnet.fullmatch(a_ ):
_UpperCamelCase = re_prior_cond_resnet.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCamelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
_UpperCamelCase = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
_UpperCamelCase = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCamelCase = prefix + resnet_block
_UpperCamelCase = re_prior_cond_resnet.sub(a_, a_ )
elif re_prior_cond_proj_in.fullmatch(a_ ):
_UpperCamelCase = re_prior_cond_proj_in.match(a_ )
_UpperCamelCase = regex_match.groups()
_UpperCamelCase = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
_UpperCamelCase = re_prior_cond_proj_in.sub(a_, a_ )
# keep original key
else:
_UpperCamelCase = original_key
_UpperCamelCase = replace_key(a_ )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
_UpperCamelCase = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
_UpperCamelCase = original_key
_UpperCamelCase = original_key
_UpperCamelCase = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( __snake_case=None, __snake_case=None ) -> Optional[int]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
_UpperCamelCase = requests.get(F'''{PREFIX}{file}''', allow_redirects=a_ )
os.makedirs(F'''{pytorch_dump_folder_path}/''', exist_ok=a_ )
open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''', '''wb''' ).write(r.content )
_UpperCamelCase = MODEL_MAPPING[model_name.split('''/''' )[-1]]
_UpperCamelCase = JukeboxConfig.from_pretrained(a_ )
_UpperCamelCase = JukeboxModel(a_ )
_UpperCamelCase = []
_UpperCamelCase = {}
for i, dict_name in enumerate(a_ ):
_UpperCamelCase = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )['''model''']
_UpperCamelCase = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
_UpperCamelCase = old_dic[k]
elif k.endswith('''.w''' ):
_UpperCamelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCamelCase = old_dic[k]
else:
_UpperCamelCase = old_dic[k]
_UpperCamelCase = '''vqvae''' if i == 0 else F'''priors.{3 - i}'''
_UpperCamelCase = fix_jukebox_keys(a_, model.state_dict(), a_, a_ )
weight_dict.append(a_ )
_UpperCamelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(a_ )
for i in range(len(a_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(a_ ).mkdir(exist_ok=a_ )
with open(F'''{pytorch_dump_folder_path}/mapping.json''', '''w''' ) as txtfile:
json.dump(a_, a_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
return weight_dict
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_a = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 19 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = ["image_processor", "tokenizer"]
__A : Dict = "BlipImageProcessor"
__A : Dict = "AutoTokenizer"
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str ):
'''simple docstring'''
lowercase :Dict = False
super().__init__(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = self.image_processor
def __call__( self : Optional[int] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowercase :List[Any] = self.tokenizer
lowercase :str = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowercase :Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowercase :int = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowercase :Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def __snake_case ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def __snake_case ( self : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.tokenizer.model_input_names
lowercase :List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 677 | 0 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: List[str] = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=a_ , default=a_ , required=a_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=a_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=a_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=a_ , default=4_2 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=a_ , default=0 , help='''cuda_id.''' , )
lowercase__: Any = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if not len(a_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase__: Optional[Any] = imgs[0].size
lowercase__: Tuple = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowercase__: Union[str, Any] = grid.size
for i, img in enumerate(a_ ):
grid.paste(a_ , box=(i % cols * w, i // cols * h) )
return grid
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase="robotic cat with wings" , __UpperCAmelCase=7.5 , __UpperCAmelCase=5_0 , __UpperCAmelCase=1 , __UpperCAmelCase=4_2 , ) -> Any:
lowercase__: List[str] = torch.Generator(pipeline.device ).manual_seed(a_ )
lowercase__: Any = pipeline(
a_ , guidance_scale=a_ , num_inference_steps=a_ , generator=a_ , num_images_per_prompt=a_ , ).images
lowercase__: List[str] = int(math.sqrt(a_ ) )
lowercase__: str = image_grid(a_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__A = parse_args()
# Load models and create wrapper for stable diffusion
__A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
__A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
__A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
__A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
__A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
__A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
__A = unet.to(torch.device("cuda", args.cuda_id))
__A = pipeline.to(unet.device)
__A ,__A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
__A = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 586 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( __UpperCAmelCase ):
@require_torch
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[Any] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Any = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :Tuple = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :Any = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :List[str] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase :Dict = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase :List[Any] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase :str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
lowercase :List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase :str = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :str = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase :Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase :Optional[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :Union[str, Any] = self.get_env()
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase :Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :Any = '''1'''
lowercase :Optional[Any] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Dict = '''
from transformers import pipeline
'''
lowercase :Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase :Dict = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase :Tuple = self.get_env()
lowercase :Optional[Any] = '''1'''
lowercase :Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = '''
from transformers import AutoModel
'''
lowercase :Union[str, Any] = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase :List[str] = self.get_env()
lowercase :Optional[int] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase :List[Any] = '''1'''
lowercase :Tuple = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 677 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__lowerCamelCase = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__lowerCamelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def UpperCamelCase ( __lowerCamelCase : str ):
if "://" in dataset_path:
snake_case : Dict = dataset_path.split("://" )[1]
return dataset_path
def UpperCamelCase ( __lowerCamelCase : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def UpperCamelCase ( __lowerCamelCase : fsspec.AbstractFileSystem , __lowerCamelCase : str , __lowerCamelCase : str ):
snake_case : Dict = not is_remote_filesystem(a_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(a_ ) , fs._strip_protocol(a_ ) )
else:
fs.mv(a_ , a_ , recursive=a_ )
def UpperCamelCase ( ):
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case : List[Any] = None
snake_case : Union[str, Any] = None
snake_case : List[str] = threading.Lock()
| 204 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger()
@dataclass
class __magic_name__ :
__A : nn.Module
__A : List[nn.Module] = field(default_factory=__UpperCAmelCase )
__A : list = field(default_factory=__UpperCAmelCase )
def __snake_case ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tensor , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self : int , snake_case__ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def __snake_case ( self : int ):
'''simple docstring'''
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
__A : nn.Module
__A : nn.Module
__A : int = 0
__A : List = field(default_factory=__UpperCAmelCase )
__A : List = field(default_factory=__UpperCAmelCase )
def __call__( self : Dict , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :Dict = Tracker(self.dest )(snake_case__ ).parametrized
lowercase :Optional[Any] = Tracker(self.src )(snake_case__ ).parametrized
lowercase :List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
lowercase :Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while"""
f""" destination module has {len(snake_case__ )}.""" )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCamelCase (a_ :str , a_ :ResNetConfig , a_ :Path , a_ :bool = True) -> Optional[Any]:
print(F"""Converting {name}...""")
with torch.no_grad():
lowercase :Union[str, Any] = timm.create_model(a_ , pretrained=a_).eval()
lowercase :Tuple = ResNetForImageClassification(a_).eval()
lowercase :int = ModuleTransfer(src=a_ , dest=a_)
lowercase :List[Any] = torch.randn((1, 3, 224, 224))
module_transfer(a_)
assert torch.allclose(from_model(a_) , our_model(a_).logits), "The model logits don't match the original one."
lowercase :List[Any] = F"""resnet{'-'.join(name.split('resnet'))}"""
print(a_)
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a_ , )
# we can use the convnext one
lowercase :Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''')
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a_ , )
print(F"""Pushed {checkpoint_name}""")
def lowerCamelCase (a_ :Path , a_ :str = None , a_ :bool = True) -> int:
lowercase :Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase :Union[str, Any] = 1000
lowercase :Any = (1, num_labels)
lowercase :Tuple = '''huggingface/label-files'''
lowercase :List[str] = num_labels
lowercase :Union[str, Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :Any = {int(a_): v for k, v in idalabel.items()}
lowercase :str = idalabel
lowercase :Any = {v: k for k, v in idalabel.items()}
lowercase :Union[str, Any] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_)
lowercase :Optional[int] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_)
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 677 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class lowerCAmelCase_ ( __UpperCAmelCase ):
"""simple docstring"""
_snake_case : Dict = "owlvit_text_model"
def __init__( self :str , lowerCamelCase__ :Optional[Any]=4_94_08 , lowerCamelCase__ :str=5_12 , lowerCamelCase__ :Dict=20_48 , lowerCamelCase__ :str=12 , lowerCamelCase__ :Optional[int]=8 , lowerCamelCase__ :List[Any]=16 , lowerCamelCase__ :Optional[int]="quick_gelu" , lowerCamelCase__ :Optional[int]=1e-5 , lowerCamelCase__ :List[Any]=0.0 , lowerCamelCase__ :Any=0.02 , lowerCamelCase__ :Union[str, Any]=1.0 , lowerCamelCase__ :Optional[int]=0 , lowerCamelCase__ :Optional[Any]=4_94_06 , lowerCamelCase__ :str=4_94_07 , **lowerCamelCase__ :int , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCamelCase__ :Optional[Any] = vocab_size
UpperCamelCase__ :Dict = hidden_size
UpperCamelCase__ :Optional[int] = intermediate_size
UpperCamelCase__ :List[str] = num_hidden_layers
UpperCamelCase__ :str = num_attention_heads
UpperCamelCase__ :str = max_position_embeddings
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Union[str, Any] = layer_norm_eps
UpperCamelCase__ :Tuple = attention_dropout
UpperCamelCase__ :Optional[Any] = initializer_range
UpperCamelCase__ :int = initializer_factor
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :Union[str, os.PathLike] , **lowerCamelCase__ :str ):
cls._set_token_in_kwargs(snake_case__ )
UpperCamelCase__ :List[Any] = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
UpperCamelCase__ :str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowerCAmelCase_ ( __UpperCAmelCase ):
"""simple docstring"""
_snake_case : Optional[Any] = "owlvit_vision_model"
def __init__( self :Optional[int] , lowerCamelCase__ :str=7_68 , lowerCamelCase__ :Optional[int]=30_72 , lowerCamelCase__ :Union[str, Any]=12 , lowerCamelCase__ :int=12 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=7_68 , lowerCamelCase__ :Any=32 , lowerCamelCase__ :Optional[Any]="quick_gelu" , lowerCamelCase__ :Optional[int]=1e-5 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Dict=0.02 , lowerCamelCase__ :str=1.0 , **lowerCamelCase__ :Any , ):
super().__init__(**snake_case__ )
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :Dict = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :Tuple = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Dict = hidden_act
UpperCamelCase__ :Tuple = layer_norm_eps
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :str = initializer_factor
@classmethod
def __a ( cls :Optional[int] , lowerCamelCase__ :Union[str, os.PathLike] , **lowerCamelCase__ :Optional[int] ):
cls._set_token_in_kwargs(snake_case__ )
UpperCamelCase__ :str = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
UpperCamelCase__ :List[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowerCAmelCase_ ( __UpperCAmelCase ):
"""simple docstring"""
_snake_case : List[str] = "owlvit"
_snake_case : Union[str, Any] = True
def __init__( self :Any , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :Dict=None , lowerCamelCase__ :str=5_12 , lowerCamelCase__ :Tuple=2.6592 , lowerCamelCase__ :Any=True , **lowerCamelCase__ :Union[str, Any] , ):
super().__init__(**snake_case__ )
if text_config is None:
UpperCamelCase__ :int = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
UpperCamelCase__ :Union[str, Any] = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
UpperCamelCase__ :Dict = OwlViTTextConfig(**snake_case__ )
UpperCamelCase__ :Any = OwlViTVisionConfig(**snake_case__ )
UpperCamelCase__ :List[Any] = projection_dim
UpperCamelCase__ :Union[str, Any] = logit_scale_init_value
UpperCamelCase__ :Optional[int] = return_dict
UpperCamelCase__ :List[Any] = 1.0
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :Union[str, os.PathLike] , **lowerCamelCase__ :List[Any] ):
cls._set_token_in_kwargs(snake_case__ )
UpperCamelCase__ :str = cls.get_config_dict(snake_case__ , **snake_case__ )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :Dict , lowerCamelCase__ :Dict , **lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :Tuple = {}
UpperCamelCase__ :Union[str, Any] = text_config
UpperCamelCase__ :Dict = vision_config
return cls.from_dict(snake_case__ , **snake_case__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :List[Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase__ :List[Any] = self.text_config.to_dict()
UpperCamelCase__ :Optional[int] = self.vision_config.to_dict()
UpperCamelCase__ :List[str] = self.__class__.model_type
return output
class lowerCAmelCase_ ( __UpperCAmelCase ):
"""simple docstring"""
@property
def __a ( self :Union[str, Any] ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def __a ( self :List[Any] ):
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def __a ( self :Any ):
return 1e-4
def __a ( self :Union[str, Any] , lowerCamelCase__ :"ProcessorMixin" , lowerCamelCase__ :int = -1 , lowerCamelCase__ :int = -1 , lowerCamelCase__ :Optional["TensorType"] = None , ):
UpperCamelCase__ :int = super().generate_dummy_inputs(
processor.tokenizer , batch_size=snake_case__ , seq_length=snake_case__ , framework=snake_case__ )
UpperCamelCase__ :int = super().generate_dummy_inputs(
processor.image_processor , batch_size=snake_case__ , framework=snake_case__ )
return {**text_input_dict, **image_input_dict}
@property
def __a ( self :Any ):
return 14 | 45 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : int = False
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=False ):
'''simple docstring'''
lowercase :Union[str, Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowercase :Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Any , snake_case__ : Dict , snake_case__ : Dict=1_3 , snake_case__ : Tuple=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=True , snake_case__ : Optional[Any]=True , snake_case__ : Any=9_9 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Optional[Any]=3_2 , snake_case__ : Any=2 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=3_7 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : List[str]=1_6 , snake_case__ : Union[str, Any]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Dict=4 , snake_case__ : int=None , ):
'''simple docstring'''
lowercase :Tuple = parent
lowercase :Tuple = batch_size
lowercase :Optional[Any] = seq_length
lowercase :Optional[Any] = is_training
lowercase :Optional[Any] = use_input_mask
lowercase :List[Any] = use_token_type_ids
lowercase :str = use_labels
lowercase :List[str] = vocab_size
lowercase :str = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Dict = num_attention_heads
lowercase :Any = intermediate_size
lowercase :List[str] = hidden_act
lowercase :Optional[Any] = hidden_dropout_prob
lowercase :List[Any] = attention_probs_dropout_prob
lowercase :List[Any] = max_position_embeddings
lowercase :List[Any] = type_vocab_size
lowercase :Union[str, Any] = type_sequence_label_size
lowercase :Union[str, Any] = initializer_range
lowercase :Any = num_labels
lowercase :int = num_choices
lowercase :Dict = scope
lowercase :Dict = embedding_size
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :int = None
if self.use_input_mask:
lowercase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase :Tuple = None
if self.use_token_type_ids:
lowercase :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase :Union[str, Any] = None
lowercase :int = None
lowercase :str = None
if self.use_labels:
lowercase :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase :Optional[int] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Dict = TFMobileBertModel(config=snake_case__ )
lowercase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
lowercase :Optional[int] = [input_ids, input_mask]
lowercase :Optional[int] = model(snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Any = TFMobileBertForMaskedLM(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Optional[Any] = TFMobileBertForNextSentencePrediction(config=snake_case__ )
lowercase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining(config=snake_case__ )
lowercase :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[Any] = TFMobileBertForSequenceClassification(config=snake_case__ )
lowercase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :Tuple = self.num_choices
lowercase :Any = TFMobileBertForMultipleChoice(config=snake_case__ )
lowercase :Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :List[Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowercase :Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase :Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :List[Any] = self.num_labels
lowercase :List[str] = TFMobileBertForTokenClassification(config=snake_case__ )
lowercase :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :int = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str ):
'''simple docstring'''
lowercase :Union[str, Any] = TFMobileBertForQuestionAnswering(config=snake_case__ )
lowercase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase :str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Dict = config_and_inputs
lowercase :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
@slow
def __snake_case ( self : int ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowercase :List[str] = TFMobileBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :int = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase :Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase :List[Any] = model(snake_case__ )[0]
lowercase :Union[str, Any] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case__ )
lowercase :Optional[int] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 677 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.