code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __snake_case :
"""simple docstring"""
def __init__( self , _UpperCamelCase , ) -> List[Any]:
"""simple docstring"""
__snake_case = parent
__snake_case = 13
__snake_case = 7
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = 99
__snake_case = 32
__snake_case = 2
__snake_case = 4
__snake_case = 37
__snake_case = """gelu"""
__snake_case = 0.1
__snake_case = 0.1
__snake_case = 5_12
__snake_case = 16
__snake_case = 2
__snake_case = 0.02
__snake_case = 3
__snake_case = 4
__snake_case = None
def a ( self ) -> Optional[int]:
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = self.prepare_config_and_inputs()
__snake_case = True
__snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
__snake_case = TFEsmModel(config=_UpperCamelCase )
__snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__snake_case = model(_UpperCamelCase )
__snake_case = [input_ids, input_mask]
__snake_case = model(_UpperCamelCase )
__snake_case = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Optional[int]:
"""simple docstring"""
__snake_case = True
__snake_case = TFEsmModel(config=_UpperCamelCase )
__snake_case = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
__snake_case = model(_UpperCamelCase )
__snake_case = [input_ids, input_mask]
__snake_case = model(_UpperCamelCase , encoder_hidden_states=_UpperCamelCase )
# Also check the case where encoder outputs are not passed
__snake_case = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case = TFEsmForMaskedLM(config=_UpperCamelCase )
__snake_case = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = TFEsmForTokenClassification(config=_UpperCamelCase )
__snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__snake_case = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self ) -> List[str]:
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def a ( self ) -> str:
"""simple docstring"""
__snake_case = TFEsmModelTester(self )
__snake_case = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def a ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def a ( self ) -> List[Any]:
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a ( self ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCamelCase )
def a ( self ) -> str:
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def a ( self ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def a ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFEsmModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def a ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def a ( self ) -> Dict:
"""simple docstring"""
pass
def a ( self ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_UpperCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__snake_case = model.get_bias()
assert isinstance(_UpperCamelCase , _UpperCamelCase )
for k, v in name.items():
assert isinstance(_UpperCamelCase , tf.Variable )
else:
__snake_case = model.get_output_embeddings()
assert x is None
__snake_case = model.get_bias()
assert name is None
@require_tf
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ) -> int:
"""simple docstring"""
__snake_case = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
__snake_case = tf.constant([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(_UpperCamelCase )[0]
__snake_case = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _UpperCamelCase )
# compare the actual values for a slice.
__snake_case = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def a ( self ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
__snake_case = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__snake_case = model(_UpperCamelCase )[0]
# compare the actual values for a slice.
__snake_case = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 268
|
def lowerCamelCase__ ( __A :int ,__A :int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__snake_case = str(bin(__A ) )[2:] # remove the leading "0b"
__snake_case = str(bin(__A ) )[2:] # remove the leading "0b"
__snake_case = max(len(__A ) ,len(__A ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(__A ) ,b_binary.zfill(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268
| 1
|
import heapq
def lowerCamelCase_ ( _lowercase ) -> set[int]:
__A : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_lowercase , [-1 * len(_lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
__A : Dict = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__A : List[Any] = heapq.heappop(_lowercase )[1][0]
chosen_vertices.add(_lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__A : Any = elem[1][1].index(_lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 701
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase_ ( _lowercase ) -> Tuple:
__A : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def lowerCamelCase_ ( _lowercase ) -> int:
__A , __A : Dict = emb.weight.shape
__A : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
__A : int = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _lowercase ) -> int:
__A : Union[str, Any] = torch.load(_lowercase , map_location="cpu" )
__A : Any = mam_aaa["args"] or mam_aaa["cfg"]["model"]
__A : List[Any] = mam_aaa["model"]
remove_ignore_keys_(_lowercase )
__A : Tuple = state_dict["encoder.embed_tokens.weight"].shape[0]
__A : Any = MaMaaaConfig(
vocab_size=_lowercase , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
__A : Tuple = state_dict["decoder.embed_tokens.weight"]
__A : str = MaMaaaForConditionalGeneration(_lowercase )
model.model.load_state_dict(_lowercase , strict=_lowercase )
__A : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase = parser.parse_args()
UpperCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 387
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :str , _snake_case :Tuple ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__a = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 374
| 0
|
import pprint
import requests
lowercase_ = """https://zenquotes.io/api"""
def __UpperCamelCase () -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def __UpperCamelCase () -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowercase_ = random_quotes()
pprint.pprint(response)
| 45
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'depth_multiplier' ) )
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=3 , _lowerCamelCase=3_2 , _lowerCamelCase=0.2_5 , _lowerCamelCase=8 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=3_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu6" , _lowerCamelCase=1_2_8_0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=1_0 , _lowerCamelCase=None , ):
UpperCamelCase_: List[Any] = parent
UpperCamelCase_: List[str] = batch_size
UpperCamelCase_: int = num_channels
UpperCamelCase_: Union[str, Any] = image_size
UpperCamelCase_: int = depth_multiplier
UpperCamelCase_: Optional[int] = depth_divisible_by
UpperCamelCase_: Optional[int] = min_depth
UpperCamelCase_: List[Any] = expand_ratio
UpperCamelCase_: List[Any] = tf_padding
UpperCamelCase_: str = output_stride
UpperCamelCase_: Any = first_layer_is_expansion
UpperCamelCase_: Optional[int] = finegrained_output
UpperCamelCase_: Optional[int] = hidden_act
UpperCamelCase_: Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase_: Optional[int] = classifier_dropout_prob
UpperCamelCase_: Optional[Any] = use_labels
UpperCamelCase_: Optional[Any] = is_training
UpperCamelCase_: Optional[Any] = num_labels
UpperCamelCase_: List[str] = initializer_range
UpperCamelCase_: Union[str, Any] = scope
def _a ( self ):
UpperCamelCase_: Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: str = None
if self.use_labels:
UpperCamelCase_: List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_: List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = MobileNetVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = self.num_labels
UpperCamelCase_: Dict = MobileNetVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[int] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = self.num_labels
UpperCamelCase_: Union[str, Any] = MobileNetVaForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase_: Dict = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _a ( self ):
UpperCamelCase_: Dict = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Any = config_and_inputs
UpperCamelCase_: Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : List[str] =(
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a : Dict =(
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : List[str] =False
a : Tuple =False
a : List[Any] =False
a : List[str] =False
def _a ( self ):
UpperCamelCase_: Any = MobileNetVaModelTester(self )
UpperCamelCase_: Tuple = MobileNetVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def _a ( self ):
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def _a ( self ):
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Any = model_class(_lowerCamelCase )
UpperCamelCase_: List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: str = [*signature.parameters.keys()]
UpperCamelCase_: Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_: List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_: Tuple = outputs.hidden_states
UpperCamelCase_: Union[str, Any] = 1_6
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_: List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def _a ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: int = MobileNetVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case () -> List[str]:
UpperCamelCase_: int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def _a ( self ):
UpperCamelCase_: Optional[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_lowerCamelCase )
UpperCamelCase_: Any = self.default_image_processor
UpperCamelCase_: Optional[int] = prepare_img()
UpperCamelCase_: int = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_: Tuple = model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_: Any = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_: Dict = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def _a ( self ):
UpperCamelCase_: Optional[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase_: List[Any] = model.to(_lowerCamelCase )
UpperCamelCase_: int = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase_: List[Any] = prepare_img()
UpperCamelCase_: Dict = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_: Dict = model(**_lowerCamelCase )
UpperCamelCase_: Optional[Any] = outputs.logits
# verify the logits
UpperCamelCase_: Optional[int] = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCamelCase_: int = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 57
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def snake_case (UpperCAmelCase__ ) -> Union[str, Any]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(UpperCAmelCase__ , '_dynamo' ):
return False
return isinstance(UpperCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = True ) -> Any:
UpperCamelCase_: Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase_: int = is_compiled_module(UpperCAmelCase__ )
if is_compiled:
UpperCamelCase_: List[str] = model
UpperCamelCase_: Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Dict = model.module
if not keep_fpaa_wrapper:
UpperCamelCase_: int = getattr(UpperCAmelCase__ , 'forward' )
UpperCamelCase_: List[str] = model.__dict__.pop('_original_forward' , UpperCAmelCase__ )
if original_forward is not None:
while hasattr(UpperCAmelCase__ , '__wrapped__' ):
UpperCamelCase_: Any = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase_: Optional[int] = forward
if getattr(UpperCAmelCase__ , '_converted_to_transformer_engine' , UpperCAmelCase__ ):
convert_model(UpperCAmelCase__ , to_transformer_engine=UpperCAmelCase__ )
if is_compiled:
UpperCamelCase_: Union[str, Any] = model
UpperCamelCase_: Tuple = compiled_model
return model
def snake_case () -> List[str]:
PartialState().wait_for_everyone()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCAmelCase__ , UpperCAmelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
@contextmanager
def snake_case (**UpperCAmelCase__ ) -> Any:
for key, value in kwargs.items():
UpperCamelCase_: int = str(UpperCAmelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def snake_case (UpperCAmelCase__ ) -> str:
if not hasattr(UpperCAmelCase__ , '__qualname__' ) and not hasattr(UpperCAmelCase__ , '__name__' ):
UpperCamelCase_: List[Any] = getattr(UpperCAmelCase__ , '__class__' , UpperCAmelCase__ )
if hasattr(UpperCAmelCase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(UpperCAmelCase__ , '__name__' ):
return obj.__name__
return str(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
for key, value in source.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Any = destination.setdefault(UpperCAmelCase__ , {} )
merge_dicts(UpperCAmelCase__ , UpperCAmelCase__ )
else:
UpperCamelCase_: str = value
return destination
def snake_case (UpperCAmelCase__ = None ) -> bool:
if port is None:
UpperCamelCase_: List[str] = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 57
| 1
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase : bytes , _lowerCamelCase : int ) -> np.array:
lowerCamelCase_ = F'''{sampling_rate}'''
lowerCamelCase_ = '1'
lowerCamelCase_ = 'f32le'
lowerCamelCase_ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase_ = ffmpeg_process.communicate(_lowerCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCamelCase_ = output_stream[0]
lowerCamelCase_ = np.frombuffer(_lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : str = "f32le" , ) -> Optional[Any]:
lowerCamelCase_ = F'''{sampling_rate}'''
lowerCamelCase_ = '1'
if format_for_conversion == "s16le":
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase_ = platform.system()
if system == "Linux":
lowerCamelCase_ = 'alsa'
lowerCamelCase_ = 'default'
elif system == "Darwin":
lowerCamelCase_ = 'avfoundation'
lowerCamelCase_ = ':0'
elif system == "Windows":
lowerCamelCase_ = 'dshow'
lowerCamelCase_ = 'default'
lowerCamelCase_ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase_ = _ffmpeg_stream(_lowerCamelCase , _lowerCamelCase )
for item in iterator:
yield item
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , _lowerCamelCase : str = "f32le" , ) -> Tuple:
if stream_chunk_s is not None:
lowerCamelCase_ = stream_chunk_s
else:
lowerCamelCase_ = chunk_length_s
lowerCamelCase_ = ffmpeg_microphone(_lowerCamelCase , _lowerCamelCase , format_for_conversion=_lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase_ = np.intaa
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = np.floataa
lowerCamelCase_ = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase_ = chunk_length_s / 6
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCamelCase , (int, float) ):
lowerCamelCase_ = [stride_length_s, stride_length_s]
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase_ = datetime.datetime.now()
lowerCamelCase_ = datetime.timedelta(seconds=_lowerCamelCase )
for item in chunk_bytes_iter(_lowerCamelCase , _lowerCamelCase , stride=(stride_left, stride_right) , stream=_lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase_ = np.frombuffer(item['raw'] , dtype=_lowerCamelCase )
lowerCamelCase_ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCamelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Tuple[int, int] , _lowerCamelCase : bool = False ) -> Tuple:
lowerCamelCase_ = b''
lowerCamelCase_ , lowerCamelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCamelCase ) < chunk_len:
lowerCamelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase_ = (_stride_left, stride_right)
lowerCamelCase_ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCamelCase_ = False
yield item
lowerCamelCase_ = stride_left
lowerCamelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCamelCase ) > stride_left:
lowerCamelCase_ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCamelCase_ = False
yield item
def lowerCamelCase__ ( _lowerCamelCase : List[str] , _lowerCamelCase : int ) -> Tuple:
lowerCamelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(_lowerCamelCase , stdout=subprocess.PIPE , bufsize=_lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase_ = ffmpeg_process.stdout.read(_lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 706
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class a ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ) -> str:
lowerCamelCase_ = [10, 20, 30, 40, 50, 60]
lowerCamelCase_ = [2, 4, 6, 8, 10, 12]
lowerCamelCase_ = 100
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 210 )
def UpperCamelCase ( self : Tuple ) -> Dict:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'max_weight must greater than zero.' )
def UpperCamelCase ( self : Dict ) -> str:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'Weight can not be negative.' )
def UpperCamelCase ( self : List[str] ) -> Any:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'Profit can not be negative.' )
def UpperCamelCase ( self : Any ) -> Dict:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'max_weight must greater than zero.' )
def UpperCamelCase ( self : List[str] ) -> str:
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 137
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE : str = "audio"
SCREAMING_SNAKE_CASE : str = "transcription"
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase_ = copy.deepcopy(self )
lowerCamelCase_ = self.input_schema.copy()
lowerCamelCase_ = features[self.audio_column]
lowerCamelCase_ = input_schema
return task_template
@property
def UpperCamelCase ( self : int ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 549
|
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : complex , _lowerCamelCase : str = "x" , _lowerCamelCase : float = 10**-10 , _lowerCamelCase : int = 1 , ) -> complex:
lowerCamelCase_ = symbols(_lowerCamelCase )
lowerCamelCase_ = lambdify(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase_ = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
lowerCamelCase_ = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCamelCase_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 549
| 1
|
"""simple docstring"""
def _a ( UpperCAmelCase__ = 50 ) -> int:
__SCREAMING_SNAKE_CASE = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 690
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
lowerCAmelCase__ =list[list[float | int]]
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Matrix:
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase__ )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for row in range(UpperCAmelCase__ ):
for col in range(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = matrix[row][col]
__SCREAMING_SNAKE_CASE = vector[row][0]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
__SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase__ , UpperCAmelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
__SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , UpperCAmelCase__ ):
for row in range(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(UpperCAmelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase__ )
]
def _a ( UpperCAmelCase__ ) -> Callable[[int], int]:
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
__SCREAMING_SNAKE_CASE = [[0] for _ in range(UpperCAmelCase__ )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(UpperCAmelCase__ ):
for col in range(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
__SCREAMING_SNAKE_CASE = y_val
__SCREAMING_SNAKE_CASE = solve(UpperCAmelCase__ , UpperCAmelCase__ )
def interpolated_func(UpperCAmelCase__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCAmelCase__ ) )
return interpolated_func
def _a ( UpperCAmelCase__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _a ( UpperCAmelCase__ = question_function , UpperCAmelCase__ = 10 ) -> int:
__SCREAMING_SNAKE_CASE = [func(UpperCAmelCase__ ) for x_val in range(1 , order + 1 )]
__SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
__SCREAMING_SNAKE_CASE = 1
while func(UpperCAmelCase__ ) == poly(UpperCAmelCase__ ):
x_val += 1
ret += poly(UpperCAmelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 690
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Tuple = state_dict.pop(snake_case__ )
_snake_case : Tuple = val
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_snake_case : str = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
_snake_case : Tuple = value
else:
_snake_case : Optional[Any] = value
return new_state_dict
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int=False ):
"""simple docstring"""
_snake_case : int = """"""
if is_panoptic:
_snake_case : List[Any] = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case : List[str] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_snake_case : int = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[:2_56, :]
_snake_case : List[Any] = in_proj_bias[:2_56]
_snake_case : List[str] = in_proj_weight[2_56:5_12, :]
_snake_case : List[str] = in_proj_bias[2_56:5_12]
_snake_case : Dict = in_proj_weight[-2_56:, :]
_snake_case : Union[str, Any] = in_proj_bias[-2_56:]
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : Optional[int] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_snake_case : List[str] = """resnet101"""
if "dc5" in model_name:
_snake_case : Any = True
_snake_case : Any = """panoptic""" in model_name
if is_panoptic:
_snake_case : List[str] = 2_50
else:
_snake_case : Union[str, Any] = 91
_snake_case : Any = """huggingface/label-files"""
_snake_case : Tuple = """coco-detection-id2label.json"""
_snake_case : Optional[int] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : int = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Dict = idalabel
_snake_case : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
_snake_case : Any = """coco_panoptic""" if is_panoptic else """coco_detection"""
_snake_case : List[Any] = ConditionalDetrImageProcessor(format=snake_case__ )
# prepare image
_snake_case : str = prepare_img()
_snake_case : List[Any] = image_processor(images=snake_case__ , return_tensors="""pt""" )
_snake_case : int = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
_snake_case : Dict = torch.hub.load("""DeppMeng/ConditionalDETR""" , snake_case__ , pretrained=snake_case__ ).eval()
_snake_case : int = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_snake_case : Dict = """conditional_detr.""" + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
_snake_case : List[str] = rename_backbone_keys(snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case : Union[str, Any] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_snake_case : Tuple = state_dict.pop(snake_case__ )
_snake_case : List[str] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_snake_case : List[str] = state_dict.pop(snake_case__ )
_snake_case : List[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_snake_case : Union[str, Any] = state_dict.pop(snake_case__ )
_snake_case : Optional[int] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_snake_case : List[Any] = state_dict.pop(snake_case__ )
_snake_case : int = val
# finally, create HuggingFace model and load state dict
_snake_case : List[Any] = ConditionalDetrForSegmentation(snake_case__ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
model.push_to_hub(repo_id=snake_case__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
_snake_case : int = conditional_detr(snake_case__ )
_snake_case : Dict = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 609
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A_ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
if args.student_type == "roberta":
_snake_case : int = False
elif args.student_type == "gpt2":
_snake_case : Optional[Any] = False
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if args.student_type == "roberta":
_snake_case : int = False
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=snake_case__ , required=snake_case__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=snake_case__ , required=snake_case__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=snake_case__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=snake_case__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=snake_case__ , required=snake_case__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=snake_case__ , type=snake_case__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=snake_case__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=snake_case__ , required=snake_case__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=snake_case__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=snake_case__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=snake_case__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=snake_case__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=snake_case__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=snake_case__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=snake_case__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=snake_case__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=snake_case__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=snake_case__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=snake_case__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=snake_case__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=snake_case__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=snake_case__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=snake_case__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=snake_case__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=snake_case__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=snake_case__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=snake_case__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=snake_case__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=snake_case__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=snake_case__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=snake_case__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=snake_case__ , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=snake_case__ , default=5_00 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=snake_case__ , default=40_00 , help="""Checkpoint interval.""" )
_snake_case : str = parser.parse_args()
sanity_checks(snake_case__ )
# ARGS #
init_gpu_params(snake_case__ )
set_seed(snake_case__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(snake_case__ ) , snake_case__ , indent=4 )
git_log(args.dump_path )
_snake_case , _snake_case , _snake_case : int = MODEL_CLASSES[args.student_type]
_snake_case , _snake_case , _snake_case : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_snake_case : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_snake_case : int = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_snake_case : List[str] = tokenizer.all_special_tokens.index(snake_case__ )
_snake_case : Dict = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
_snake_case : Optional[int] = special_tok_ids
_snake_case : Optional[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
_snake_case : Dict = pickle.load(snake_case__ )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
_snake_case : int = pickle.load(snake_case__ )
_snake_case : List[str] = np.maximum(snake_case__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_snake_case : Any = 0.0 # do not predict special tokens
_snake_case : str = torch.from_numpy(snake_case__ )
else:
_snake_case : Optional[int] = None
_snake_case : List[str] = LmSeqsDataset(params=snake_case__ , data=snake_case__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
_snake_case : Union[str, Any] = student_config_class.from_pretrained(args.student_config )
_snake_case : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
_snake_case : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case__ )
else:
_snake_case : Dict = student_model_class(snake_case__ )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
_snake_case : Dict = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case__ )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case__ , snake_case__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case__ , snake_case__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_snake_case : Union[str, Any] = Distiller(
params=snake_case__ , dataset=snake_case__ , token_probs=snake_case__ , student=snake_case__ , teacher=snake_case__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 609
| 1
|
__magic_name__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = set()
# keep track of all the paths to be checked
snake_case__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
snake_case__ = queue.pop(0 )
# get the last node from the path
snake_case__ = path[-1]
if node not in explored:
snake_case__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
snake_case__ = list(__lowerCAmelCase )
new_path.append(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__lowerCAmelCase )
# in case there's no path between the 2 nodes
return []
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
snake_case__ = [start]
snake_case__ = set(__lowerCAmelCase )
# Keep tab on distances from `start` node.
snake_case__ = {start: 0, target: -1}
while queue:
snake_case__ = queue.pop(0 )
if node == target:
snake_case__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
snake_case__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 714
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return None
class _SCREAMING_SNAKE_CASE :
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_A : int = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase , "tf" , 12 , **lowerCamelCase )
@require_torch
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase , "pt" , 12 , **lowerCamelCase )
@require_torch
@slow
def A_ ( self ):
from transformers import BertModel
snake_case__ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(lowerCamelCase ) )
vocab_file.flush()
snake_case__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case__ = BertModel(BertConfig(vocab_size=len(lowerCamelCase ) ) )
model.save_pretrained(lowerCamelCase )
self._test_export(lowerCamelCase , "pt" , 12 , lowerCamelCase )
@require_tf
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case__ = self._test_export(lowerCamelCase , "tf" , 12 , **lowerCamelCase )
snake_case__ = quantize(Path(lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case__ = self._test_export(lowerCamelCase , "pt" , 12 , **lowerCamelCase )
snake_case__ = quantize(lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case__ = Path(lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
return path
except Exception as e:
self.fail(lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def A_ ( self ):
from transformers import BertModel
snake_case__ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
snake_case__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase , lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def A_ ( self ):
from transformers import TFBertModel
snake_case__ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
snake_case__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase , lowerCamelCase , "tf" )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = FeatureExtractionPipeline(lowerCamelCase , lowerCamelCase )
snake_case__ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
snake_case__ , snake_case__ , snake_case__ , snake_case__ = infer_shapes(lowerCamelCase , lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def A_ ( self ):
snake_case__ = ["input_ids", "attention_mask", "token_type_ids"]
snake_case__ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
snake_case__ , snake_case__ = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase , lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase ) , set(lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case__ , snake_case__ = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase , lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase ) , 1 )
self.assertEqual(len(lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def A_ ( self ):
snake_case__ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 530
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( __lowercase ):
"""simple docstring"""
lowerCAmelCase__ = "speech_to_text_2"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Dict=10_000 , __SCREAMING_SNAKE_CASE : Any=6 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Dict="relu" , __SCREAMING_SNAKE_CASE : Optional[Any]=256 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : str=1_024 , **__SCREAMING_SNAKE_CASE : str , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE = max_target_positions
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 627
|
def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10
| 0
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE__ = '''\
'''
SCREAMING_SNAKE_CASE__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
SCREAMING_SNAKE_CASE__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _a ( self : Dict , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : int = 16 , _snake_case : bool = True , _snake_case : Dict=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A__ = 'cuda'
else:
A__ = 'cuda' if torch.cuda.is_available() else 'cpu'
A__ = AutoModelForCausalLM.from_pretrained(_snake_case )
A__ = model.to(_snake_case )
A__ = AutoTokenizer.from_pretrained(_snake_case )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_snake_case ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A__ = model.config.max_length - 1
else:
A__ = model.config.max_length
A__ = tokenizer(
_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors='pt' , return_attention_mask=_snake_case , ).to(_snake_case )
A__ = encodings['input_ids']
A__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A__ = []
A__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(_snake_case ) , _snake_case ) ):
A__ = min(start_index + batch_size , len(_snake_case ) )
A__ = encoded_texts[start_index:end_index]
A__ = attn_masks[start_index:end_index]
if add_start_token:
A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_snake_case )
A__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_snake_case ), attn_mask] , dim=1 )
A__ = encoded_batch
with torch.no_grad():
A__ = model(_snake_case , attention_mask=_snake_case ).logits
A__ = out_logits[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = attn_mask[..., 1:].contiguous()
A__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _snake_case ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_snake_case )}
| 52
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 1
|
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : str = 2
while i * i <= n:
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : List[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(_a) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 25
|
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : np.ndarray , snake_case_ : Union[int, Iterable[int]] , snake_case_ : bool , snake_case_ : int ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(snake_case_ : Dict , snake_case_ : str , snake_case_ : Dict=0 , snake_case_ : Optional[int]=None ):
_lowerCAmelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase = (output_size, output_size) if isinstance(snake_case_ , snake_case_ ) else output_size
_lowerCAmelCase , _lowerCAmelCase = get_image_size(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = output_size
# determine new height and width
_lowerCAmelCase = output_height / input_height
_lowerCAmelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase = scale_width
else:
# fit height
_lowerCAmelCase = scale_height
_lowerCAmelCase = constraint_to_multiple_of(scale_height * input_height , multiple=snake_case_ )
_lowerCAmelCase = constraint_to_multiple_of(scale_width * input_width , multiple=snake_case_ )
return (new_height, new_width)
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = ['pixel_values']
def __init__(self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = False , lowerCamelCase = 1 , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = size if size is not None else {"""height""": 384, """width""": 384}
_lowerCAmelCase = get_size_dict(lowerCamelCase )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = keep_aspect_ratio
_lowerCAmelCase = ensure_multiple_of
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = 1 , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowerCAmelCase = get_resize_output_image_size(
lowerCamelCase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=lowerCamelCase , multiple=lowerCamelCase , )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(lowerCamelCase )
_lowerCAmelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCamelCase ):
_lowerCAmelCase = target_sizes.numpy()
_lowerCAmelCase = []
for idx in range(len(lowerCamelCase ) ):
_lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCamelCase )
_lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase )
else:
_lowerCAmelCase = logits.argmax(dim=1 )
_lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 156
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664
| 1
|
import math
import random
def lowerCamelCase_ ( _lowercase , _lowercase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCamelCase = 0.02
def lowerCamelCase_ ( _lowercase , _lowercase ) -> float:
__A : str = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(_lowercase ):
# Forward propagation
__A : Any = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__A : Any = (expected / 100) - layer_a
# Error delta
__A : List[Any] = layer_1_error * sigmoid_function(_lowercase , _lowercase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = int(input('Expected value: '))
UpperCamelCase = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 520
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = DistilBertTokenizer
lowerCamelCase_ : Any = DistilBertTokenizerFast
lowerCamelCase_ : Union[str, Any] = True
@slow
def __UpperCAmelCase( self ):
__A : Any = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
__A : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCAmelCase )
__A : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCAmelCase )
__A : Any = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__A : str = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 520
| 1
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def a_ ( _A , _A , _A , _A=5 ) -> List[Any]:
"""simple docstring"""
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
snake_case__ = torch.tensor(tokenizer.encode(_A , add_special_tokens=_A ) ).unsqueeze(0 ) # Batch size 1
snake_case__ = model(_A )[0] # The last hidden-state is the first element of the output tuple
snake_case__ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
snake_case__ = logits[0, masked_index, :]
snake_case__ = logits.softmax(dim=0 )
snake_case__ , snake_case__ = prob.topk(k=_A , dim=0 )
snake_case__ = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_A ) )] )
snake_case__ = tokenizer.mask_token
snake_case__ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
snake_case__ = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(_A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(_A ) , _A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_A , _A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__UpperCamelCase : int = CamembertTokenizer.from_pretrained("""camembert-base""")
__UpperCamelCase : Optional[Any] = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
__UpperCamelCase : Optional[int] = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 372
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "roformer"
def __init__( self: Tuple , UpperCamelCase: Optional[Any]=5_00_00 , UpperCamelCase: str=None , UpperCamelCase: Any=7_68 , UpperCamelCase: Dict=12 , UpperCamelCase: List[Any]=12 , UpperCamelCase: List[str]=30_72 , UpperCamelCase: int="gelu" , UpperCamelCase: str=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Any=15_36 , UpperCamelCase: Dict=2 , UpperCamelCase: Dict=0.02 , UpperCamelCase: List[str]=1e-12 , UpperCamelCase: int=0 , UpperCamelCase: Any=False , UpperCamelCase: int=True , **UpperCamelCase: List[Any] , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
snake_case__ = vocab_size
snake_case__ = hidden_size if embedding_size is None else embedding_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = rotary_value
snake_case__ = use_cache
class __SCREAMING_SNAKE_CASE( a_ ):
@property
def lowerCAmelCase_ ( self: Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ = {0: 'batch', 1: 'sequence'}
snake_case__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 372
| 1
|
from pathlib import Path
import fire
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE__ = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open("""w""" ).write("""\n""".join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 6
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
| 6
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 404
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_snake_case , _snake_case = array[indexa], array[indexa]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if length > 1:
_snake_case = int(length / 2 )
for i in range(lowerCAmelCase_ , low + middle ):
comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if length > 1:
_snake_case = int(length / 2 )
bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1 )
bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0 )
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = input('''Enter numbers separated by a comma:\n''').strip()
snake_case = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 404
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=3 , lowercase__=32 , lowercase__=3 , lowercase__=10 , lowercase__=[10, 20, 30, 40] , lowercase__=[1, 1, 2, 1] , lowercase__=True , lowercase__=True , lowercase__="relu" , lowercase__=3 , lowercase__=None , ) -> str:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embeddings_size
__UpperCAmelCase = hidden_sizes
__UpperCAmelCase = depths
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_act
__UpperCAmelCase = num_labels
__UpperCAmelCase = scope
__UpperCAmelCase = len(lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Tuple:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = TFResNetModel(config=lowercase__ )
__UpperCAmelCase = model(lowercase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Any:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFResNetForImageClassification(lowercase__ )
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = TFResNetModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ (self ) -> Any:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def lowerCAmelCase_ (self ) -> Dict:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowercase__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase = layer_type
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = TFResNetModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> Tuple:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''tf''' )
# forward pass
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase__ , atol=1E-4 ) )
| 303
|
from __future__ import annotations
import time
A_ : Optional[Any] = list[tuple[int, int]]
A_ : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A_ : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
__UpperCAmelCase = pos_x
__UpperCAmelCase = pos_y
__UpperCAmelCase = (pos_y, pos_x)
__UpperCAmelCase = goal_x
__UpperCAmelCase = goal_y
__UpperCAmelCase = parent
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase__ )
__UpperCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase__ )
__UpperCAmelCase = [self.start]
__UpperCAmelCase = False
def lowerCAmelCase_ (self ) -> Path | None:
while self.node_queue:
__UpperCAmelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__UpperCAmelCase = True
return self.retrace_path(lowercase__ )
__UpperCAmelCase = self.get_successors(lowercase__ )
for node in successors:
self.node_queue.append(lowercase__ )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase_ (self , lowercase__ ) -> list[Node]:
__UpperCAmelCase = []
for action in delta:
__UpperCAmelCase = parent.pos_x + action[1]
__UpperCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowercase__ , lowercase__ , self.target.pos_y , self.target.pos_x , lowercase__ ) )
return successors
def lowerCAmelCase_ (self , lowercase__ ) -> Path:
__UpperCAmelCase = node
__UpperCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCAmelCase = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ ) -> str:
__UpperCAmelCase = BreadthFirstSearch(lowercase__ , lowercase__ )
__UpperCAmelCase = BreadthFirstSearch(lowercase__ , lowercase__ )
__UpperCAmelCase = False
def lowerCAmelCase_ (self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__UpperCAmelCase = self.fwd_bfs.node_queue.pop(0 )
__UpperCAmelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__UpperCAmelCase = True
return self.retrace_bidirectional_path(
lowercase__ , lowercase__ )
__UpperCAmelCase = current_bwd_node
__UpperCAmelCase = current_fwd_node
__UpperCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowercase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowercase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowercase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Path:
__UpperCAmelCase = self.fwd_bfs.retrace_path(lowercase__ )
__UpperCAmelCase = self.bwd_bfs.retrace_path(lowercase__ )
bwd_path.pop()
bwd_path.reverse()
__UpperCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A_ : Dict = (0, 0)
A_ : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A_ : Optional[int] = time.time()
A_ : Optional[int] = BreadthFirstSearch(init, goal)
A_ : Union[str, Any] = bfs.search()
A_ : str = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
A_ : int = time.time()
A_ : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
A_ : Optional[int] = bd_bfs.search()
A_ : Tuple = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 303
| 1
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a__: str = logging.getLogger(__name__)
a__: Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a__: Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
__SCREAMING_SNAKE_CASE = field(default=UpperCamelCase__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__SCREAMING_SNAKE_CASE = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=UpperCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__( UpperCamelCase__ : DataTrainingArguments , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , )->Tuple:
def _dataset(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCamelCase__( )->Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
A__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
A__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
A__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
A__ = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
A__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
A__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
A__ = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
A__ = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
A__ = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
A__ = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
A__ = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A__ = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
A__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A__ = trainer.evaluate()
A__ = math.exp(eval_output['''eval_loss'''] )
A__ = {'''perplexity''': perplexity}
A__ = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , UpperCamelCase__ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def UpperCamelCase__( UpperCamelCase__ : List[str] )->Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 212
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__: str = logging.get_logger(__name__)
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] )->Any:
A__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A__ = [1_44, 1_92, 2_40]
A__ = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
A__ = [96, 1_20, 1_44]
A__ = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
A__ = [64, 80, 96]
A__ = [16, 16, 24, 48, 64, 80, 3_20]
A__ = 0.05
A__ = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
A__ = 5_12
A__ = 16
A__ = 21
A__ = '''pascal-voc-id2label.json'''
else:
A__ = 10_00
A__ = '''imagenet-1k-id2label.json'''
A__ = '''huggingface/label-files'''
A__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False )->Optional[Any]:
for i in range(1 , 6 ):
if f"layer_{i}." in name:
A__ = name.replace(f"layer_{i}." , f"encoder.layer.{i - 1}." )
if "conv_1." in name:
A__ = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
A__ = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
A__ = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
A__ = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
A__ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
A__ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
A__ = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
A__ = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
A__ = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
A__ = name.replace(f".{i}.{j}." , f".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
A__ = name.replace(f".{i}.{j}." , f".{i}." )
if "expand_1x1" in name:
A__ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
A__ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
A__ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f".global_rep.{i}.weight" in name:
A__ = name.replace(f".global_rep.{i}.weight" , '''.layernorm.weight''' )
if f".global_rep.{i}.bias" in name:
A__ = name.replace(f".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
A__ = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
A__ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
A__ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
A__ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
A__ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
A__ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
A__ = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
A__ = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
A__ = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
A__ = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
A__ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
A__ = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
A__ = '''mobilevit.''' + name
return name
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=False )->Optional[int]:
if base_model:
A__ = ''''''
else:
A__ = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(UpperCamelCase__ )
if key[:8] == "encoder.":
A__ = key[8:]
if "qkv" in key:
A__ = key.split('''.''' )
A__ = int(key_split[0][6:] ) - 1
A__ = int(key_split[3] )
A__ = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}" )
A__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A__ = (
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def UpperCamelCase__( )->List[str]:
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=False )->Dict:
A__ = get_mobilevit_config(UpperCamelCase__ )
# load original state_dict
A__ = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
A__ = MobileViTForSemanticSegmentation(UpperCamelCase__ ).eval()
else:
A__ = MobileViTForImageClassification(UpperCamelCase__ ).eval()
A__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ = model(**UpperCamelCase__ )
A__ = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A__ = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A__ = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A__ = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
A__ = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
A__ = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
A__ = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
A__ = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
A__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(UpperCamelCase__ , organization='''apple''' )
model.push_to_hub(UpperCamelCase__ , organization='''apple''' )
if __name__ == "__main__":
a__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__: Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 212
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_UpperCAmelCase = imread(R'digital_image_processing/image_data/lena_small.jpg')
_UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__lowerCAmelCase : Optional[int] = cn.convert_to_negative(SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__lowerCAmelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowerCAmelCase : Optional[int] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase : Optional[int] = canny.canny(SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def _SCREAMING_SNAKE_CASE ( ) -> int:
# laplace diagonals
__lowerCAmelCase : Tuple = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase : Optional[Any] = conv.img_convolve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).astype(SCREAMING_SNAKE_CASE )
assert res.any()
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
assert med.median_filter(SCREAMING_SNAKE_CASE , 3 ).any()
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase : Any = sob.sobel_filter(SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowerCAmelCase : List[Any] = sp.make_sepia(SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str = "digital_image_processing/image_data/lena_small.jpg" ) -> Dict:
__lowerCAmelCase : Optional[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Dict:
__lowerCAmelCase : int = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _SCREAMING_SNAKE_CASE ( ) -> int:
__lowerCAmelCase : Tuple = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__lowerCAmelCase : Tuple = imread(SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : List[str] = image[x_coordinate][y_coordinate]
__lowerCAmelCase : List[Any] = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase : Optional[Any] = lbp.local_binary_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 504
|
import numpy
class snake_case_ :
def __init__( self : List[str] , _snake_case : numpy.ndarray , _snake_case : numpy.ndarray )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__lowerCAmelCase : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__lowerCAmelCase : Union[str, Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__lowerCAmelCase : Dict = numpy.random.rand(3 , 1 )
# Real output values provided.
__lowerCAmelCase : Optional[int] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__lowerCAmelCase : Tuple = numpy.zeros(output_array.shape )
def UpperCAmelCase__ ( self : int )->numpy.ndarray:
'''simple docstring'''
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__lowerCAmelCase : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__lowerCAmelCase : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ ( self : int )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__lowerCAmelCase : Dict = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ ( self : Any , _snake_case : numpy.ndarray , _snake_case : int , _snake_case : bool )->None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
__lowerCAmelCase : Tuple = self.feedforward()
self.back_propagation()
if give_loss:
__lowerCAmelCase : List[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : numpy.ndarray )->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = input_arr
__lowerCAmelCase : str = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__lowerCAmelCase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :numpy.ndarray ) -> numpy.ndarray:
return (value) * (1 - (value))
def _SCREAMING_SNAKE_CASE ( ) -> int:
__lowerCAmelCase : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__lowerCAmelCase : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__lowerCAmelCase : Union[str, Any] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE , output_array=SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE , iterations=10 , give_loss=SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 504
| 1
|
def UpperCAmelCase__ ( lowercase__ ) -> bool:
return str(lowercase__ ) == str(lowercase__ )[::-1]
def UpperCAmelCase__ ( lowercase__ ) -> int:
return int(lowercase__ ) + int(str(lowercase__ )[::-1] )
def UpperCAmelCase__ ( lowercase__ = 10_000 ) -> int:
__lowercase = []
for num in range(1 , lowercase__ ):
__lowercase = 0
__lowercase = num
while iterations < 50:
__lowercase = sum_reverse(lowercase__ )
iterations += 1
if is_palindrome(lowercase__ ):
break
else:
lychrel_nums.append(lowercase__ )
return len(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 634
|
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
__lowercase = len(lowercase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase__ ( A_: int , A_: int , A_: int , A_: int , A_: int , A_: int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__UpperCAmelCase =ksize + 1
__UpperCAmelCase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A_ ):
for x in range(A_ ):
# distance from center
__UpperCAmelCase =x - ksize // 2
__UpperCAmelCase =y - ksize // 2
# degree to radiant
__UpperCAmelCase =theta / 180 * np.pi
__UpperCAmelCase =np.cos(_theta )
__UpperCAmelCase =np.sin(_theta )
# get kernel x
__UpperCAmelCase =cos_theta * px + sin_theta * py
# get kernel y
__UpperCAmelCase =-sin_theta * px + cos_theta * py
# fill kernel
__UpperCAmelCase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__A = imread("../image_data/lena.jpg")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__A = out / out.max() * 2_55
__A = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 68
|
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def lowerCamelCase(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
if tokenize_kwargs is None:
A_ : Dict = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
A_ : Optional[Any] = truncation
A_ : Optional[int] = tokenize_kwargs
A_ : Optional[Any] = {}
if return_tensors is not None:
A_ : Optional[Any] = return_tensors
return preprocess_params, {}, postprocess_params
def lowerCamelCase(self , lowerCAmelCase_ , **lowerCAmelCase_ ):
A_ : List[Any] = self.framework
A_ : Optional[int] = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
return model_inputs
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : List[Any] = self.model(**lowerCAmelCase_ )
return model_outputs
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 180
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Dict = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__lowercase :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "decision_transformer"
lowercase_ = ["past_key_values"]
lowercase_ = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self : str , UpperCAmelCase_ : Optional[int]=17 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : int=128 , UpperCAmelCase_ : Union[str, Any]=4_096 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : List[str]=1_024 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str="relu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=50_256 , UpperCAmelCase_ : Optional[Any]=50_256 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Dict=False , **UpperCAmelCase_ : Tuple , ) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =state_dim
lowerCamelCase__: str =act_dim
lowerCamelCase__: Optional[Any] =hidden_size
lowerCamelCase__: Any =max_ep_len
lowerCamelCase__: Any =action_tanh
lowerCamelCase__: List[str] =vocab_size
lowerCamelCase__: Optional[int] =n_positions
lowerCamelCase__: str =n_layer
lowerCamelCase__: List[Any] =n_head
lowerCamelCase__: List[Any] =n_inner
lowerCamelCase__: str =activation_function
lowerCamelCase__: str =resid_pdrop
lowerCamelCase__: Any =embd_pdrop
lowerCamelCase__: str =attn_pdrop
lowerCamelCase__: Tuple =layer_norm_epsilon
lowerCamelCase__: Optional[int] =initializer_range
lowerCamelCase__: str =scale_attn_weights
lowerCamelCase__: Optional[Any] =use_cache
lowerCamelCase__: Any =scale_attn_by_inverse_layer_idx
lowerCamelCase__: List[Any] =reorder_and_upcast_attn
lowerCamelCase__: Tuple =bos_token_id
lowerCamelCase__: List[Any] =eos_token_id
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
| 59
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__A = data_utils.TransfoXLTokenizer
__A = data_utils.TransfoXLCorpus
__A = data_utils
__A = data_utils
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__a , "rb" ) as fp:
lowerCamelCase__: Optional[Any] =pickle.load(__a , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCamelCase__: Union[str, Any] =pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowerCamelCase__: Any =corpus.vocab.__dict__
torch.save(__a , __a )
lowerCamelCase__: Dict =corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , __a )
lowerCamelCase__: List[str] =pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__a , __a )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCamelCase__: Optional[Any] =os.path.abspath(__a )
lowerCamelCase__: Dict =os.path.abspath(__a )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCamelCase__: int =TransfoXLConfig()
else:
lowerCamelCase__: Any =TransfoXLConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase__: List[Any] =TransfoXLLMHeadModel(__a )
lowerCamelCase__: List[str] =load_tf_weights_in_transfo_xl(__a , __a , __a )
# Save pytorch-model
lowerCamelCase__: List[str] =os.path.join(__a , __a )
lowerCamelCase__: Tuple =os.path.join(__a , __a )
print(F"""Save PyTorch model to {os.path.abspath(__a )}""" )
torch.save(model.state_dict() , __a )
print(F"""Save configuration file to {os.path.abspath(__a )}""" )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__A = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 59
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> int:
'''simple docstring'''
A__ = data
A__ = None
class a__ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
A__ = None
A__ = None
def __iter__( self ) -> Iterator[Any]:
'''simple docstring'''
A__ = self.head
while self.head:
yield node.data
A__ = node.next
if node == self.head:
break
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return "->".join(str(lowercase ) for item in iter(self ) )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowercase )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
A__ = Node(lowercase )
if self.head is None:
A__ = new_node # first node points itself
A__ = A__ = new_node
elif index == 0: # insert at head
A__ = self.head
A__ = A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
if index == len(self ) - 1: # insert at tail
A__ = new_node
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self , lowercase = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
A__ = self.head
if self.head == self.tail: # just one node
A__ = A__ = None
elif index == 0: # delete head node
A__ = self.tail.next.next
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
if index == len(self ) - 1: # delete at tail
A__ = temp
return delete_node.data
def UpperCamelCase ( self ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626
|
lowerCAmelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: bytes ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE_ )
A__ = "".join(bin(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for byte in data )
A__ = len(SCREAMING_SNAKE_CASE_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
A__ = b"=" * ((6 - len(SCREAMING_SNAKE_CASE_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE_ ) % 6)
else:
A__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = (
"argument should be a bytes-like object or ASCII string, "
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
A__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
A__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A__ = encoded_data[:-padding]
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )
A__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626
| 1
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return number | (1 << position)
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return number & ~(1 << position)
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return number ^ (1 << position)
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return ((number >> position) & 1) == 1
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
def lowercase__( _UpperCamelCase : str )-> str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(_UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 138
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A : Optional[Any] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""pixel_values"""]
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> List[Any]:
super().__init__(**__UpperCamelCase )
__lowerCAmelCase = size if size is not None else {"""shortest_edge""": 3_84}
__lowerCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
__lowerCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[Any]:
__lowerCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" )
__lowerCAmelCase = size["""shortest_edge"""]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowerCAmelCase = int(shortest_edge / crop_pct )
__lowerCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase )
__lowerCAmelCase = resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__UpperCamelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCamelCase , **__UpperCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__UpperCamelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def a ( self : str , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> int:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Optional[int]:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
__lowerCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , crop_pct=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
__lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 715
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int = 10_00 ) -> int:
'''simple docstring'''
__lowerCAmelCase = -1
__lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
__lowerCAmelCase = a * b * c
if candidate >= product:
__lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 330
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[Any] = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 278
|
def __lowerCamelCase ( A__ : float , A__ : float , A__ : float , A__ : float , A__ : float , ) -> float:
lowerCamelCase_ : List[str] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
lowerCamelCase_ : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
lowerCamelCase_ : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCamelCase_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case__ : str = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 278
| 1
|
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A , A ) )
if __name__ == "__main__":
__UpperCAmelCase = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 98
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''T5Config'''
def _snake_case ( A , A , A ) -> jnp.ndarray:
lowerCAmelCase__ = jnp.zeros_like(A )
lowerCAmelCase__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCAmelCase__ = shifted_input_ids.at[:, 0].set(A )
lowerCAmelCase__ = jnp.where(shifted_input_ids == -100 , A , A )
return shifted_input_ids
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = "mt5"
lowercase__ : Dict = MTaConfig
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = "mt5"
lowercase__ : Any = MTaConfig
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = "mt5"
lowercase__ : Tuple = MTaConfig
| 98
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A__ = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A__ = float("""nan""")
class _lowerCAmelCase :
def __init__( self : List[str] , __snake_case : Dict ):
lowerCamelCase :int = sys.stdout
lowerCamelCase :str = open(__snake_case , '''a''' )
def __getattr__( self : int , __snake_case : Union[str, Any] ):
return getattr(self.stdout , __snake_case )
def snake_case ( self : Tuple , __snake_case : Dict ):
self.stdout.write(__snake_case )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __snake_case , 0 , re.M ) )
def _lowerCamelCase ( a_ : Union[str, Any]=80 , a_ : str=False):
lowerCamelCase :str = []
# deal with critical env vars
lowerCamelCase :Optional[Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
lowerCamelCase :Optional[int] = os.environ.get(a_ , a_)
if val is not None:
cmd.append(F"{key}={val}")
# python executable (not always needed if the script is executable)
lowerCamelCase :str = sys.executable if full_python_path else sys.executable.split('''/''')[-1]
cmd.append(a_)
# now the normal args
cmd += list(map(shlex.quote , sys.argv))
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase :List[Any] = []
lowerCamelCase :Any = ''''''
while len(a_) > 0:
current_line += F"{cmd.pop(0)} "
if len(a_) == 0 or len(a_) + len(cmd[0]) + 1 > max_width - 1:
lines.append(a_)
lowerCamelCase :List[str] = ''''''
return "\\\n".join(a_)
def _lowerCamelCase ( a_ : Optional[int] , a_ : Dict):
# unwrap multi-line input
lowerCamelCase :int = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd)
# remove --output_dir if any and set our own
lowerCamelCase :Union[str, Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd)
args.base_cmd += F" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
lowerCamelCase :int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd)
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd)
def _lowerCamelCase ( a_ : List[Any] , a_ : Dict , a_ : int , a_ : List[str] , a_ : Optional[int] , a_ : List[Any] , a_ : Any):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0)
return dict(
{k: random.uniform(0 , 1_00) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222])} , )
lowerCamelCase :List[Any] = subprocess.run(a_ , capture_output=a_ , text=a_)
if verbose:
print('''STDOUT''' , result.stdout)
print('''STDERR''' , result.stderr)
# save the streams
lowerCamelCase :Union[str, Any] = variation.replace(''' ''' , '''-''')
with open(Path(a_) / F"log.{prefix}.stdout.txt" , '''w''') as f:
f.write(result.stdout)
with open(Path(a_) / F"log.{prefix}.stderr.txt" , '''w''') as f:
f.write(result.stderr)
if result.returncode != 0:
if verbose:
print('''failed''')
return {target_metric_key: nan}
with io.open(F"{output_dir}/all_results.json" , '''r''' , encoding='''utf-8''') as f:
lowerCamelCase :int = json.load(a_)
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _lowerCamelCase ( a_ : List[Any] , a_ : Optional[Any] , a_ : Any , a_ : Optional[Any] , a_ : Any , a_ : List[Any] , a_ : int , a_ : Any , a_ : Union[str, Any] , a_ : List[str] , ):
lowerCamelCase :Optional[Any] = []
lowerCamelCase :List[Any] = []
lowerCamelCase :List[str] = F"{id}: {variation:<{longest_variation_len}}"
lowerCamelCase :Tuple = F"{preamble}: "
lowerCamelCase :Any = set(report_metric_keys + [target_metric_key])
for i in tqdm(range(a_) , desc=a_ , leave=a_):
lowerCamelCase :Optional[Any] = process_run_single(
a_ , a_ , a_ , a_ , a_ , a_ , a_)
lowerCamelCase :int = single_run_metrics[target_metric_key]
if not math.isnan(a_):
metrics.append(a_)
results.append(a_)
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase :Dict = F"\33[2K\r{outcome}"
if len(a_) > 0:
lowerCamelCase :List[str] = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()}
lowerCamelCase :Tuple = round(mean_metrics[target_metric_key] , 2)
lowerCamelCase :Union[str, Any] = F"{outcome} {mean_target}"
if len(a_) > 1:
results_str += F" {tuple(round(a_ , 2) for x in results)}"
print(a_)
lowerCamelCase :Optional[Any] = variation
return mean_metrics
else:
print(a_)
return {variation_key: variation, target_metric_key: nan}
def _lowerCamelCase ( ):
lowerCamelCase :str = torch.cuda.get_device_properties(torch.device('''cuda'''))
return F"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _lowerCamelCase ( a_ : List[str] , a_ : Tuple , a_ : Tuple , a_ : Optional[int] , a_ : int):
lowerCamelCase :List[str] = pd.DataFrame(a_)
lowerCamelCase :int = '''variation'''
lowerCamelCase :Tuple = '''diff_%'''
lowerCamelCase :List[str] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation]):
# this may still return nan
lowerCamelCase :Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(a_):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase :str = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(a_):
lowerCamelCase :Optional[Any] = df.apply(
lambda a_: round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value)
if not math.isnan(r[target_metric_key])
else 0 , axis='''columns''' , )
# re-order columns
lowerCamelCase :Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase :str = df.reindex(a_ , axis='''columns''') # reorder cols
# capitalize
lowerCamelCase :Dict = df.rename(str.capitalize , axis='''columns''')
# make the cols as narrow as possible
lowerCamelCase :Any = df.rename(lambda a_: c.replace('''_''' , '''<br>''') , axis='''columns''')
lowerCamelCase :Tuple = df.rename(lambda a_: c.replace('''_''' , '''\n''') , axis='''columns''')
lowerCamelCase :List[Any] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=a_ , floatfmt='''.2f''')]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=a_ , floatfmt='''.2f''')]
print('''\n\n'''.join(a_))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=a_ , type=a_ , required=a_ , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=a_ , type=a_ , nargs='''+''' , required=a_ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=a_ , type=a_ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=a_ , type=a_ , required=a_ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=a_ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=a_ , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=a_ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=a_ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
lowerCamelCase :Tuple = parser.parse_args()
lowerCamelCase :Dict = args.output_dir
Path(a_).mkdir(exist_ok=a_)
lowerCamelCase :List[Any] = get_base_command(a_ , a_)
# split each dimension into its --foo variations
lowerCamelCase :int = [list(map(str.strip , re.split(R'''\|''' , a_))) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase :List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*a_))))
lowerCamelCase :Union[str, Any] = max(len(a_) for x in variations)
# split wanted keys
lowerCamelCase :List[str] = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase :Optional[Any] = F"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt"
print(F"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt")
print(F"and this script's output is also piped into {report_fn}")
lowerCamelCase :Optional[int] = Tee(a_)
print(F"\n*** Running {len(a_)} benchmarks:")
print(F"Base command: {' '.join(a_)}")
lowerCamelCase :Union[str, Any] = '''variation'''
lowerCamelCase :Optional[int] = []
for id, variation in enumerate(tqdm(a_ , desc='''Total completion: ''' , leave=a_)):
lowerCamelCase :List[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , a_ , a_ , a_ , a_ , args.target_metric_key , a_ , args.repeat_times , a_ , args.verbose , ))
process_results(a_ , args.target_metric_key , a_ , args.base_variation , a_)
if __name__ == "__main__":
main()
| 166
| 1
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowercase : Optional[Any] = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 ) -> Dict:
A : int = tokenizer
A : Dict = dataset
A : List[str] = len(__UpperCAmelCase ) if n_tasks is None else n_tasks
A : List[Any] = n_copies
def __iter__( self ) -> Tuple:
A : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
A : List[str] = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
A : int = start_length
A : Dict = eof_strings
A : List[Any] = tokenizer
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
A : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
A : Dict = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__UpperCAmelCase )
def snake_case__ ( lowerCamelCase_ ):
A : Dict = re.split('''(%s)''' % '''|'''.join(lowerCamelCase_ ) , lowerCamelCase_ )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=20 , **lowerCamelCase_ ):
A : List[Any] = defaultdict(lowerCamelCase_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowerCamelCase_ ) ):
with torch.no_grad():
A : int = batch['''ids'''].shape[-1]
A : Tuple = accelerator.unwrap_model(lowerCamelCase_ ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=lowerCamelCase_ , **lowerCamelCase_ )
# each task is generated batch_size times
A : Optional[int] = batch['''task_id'''].repeat(lowerCamelCase_ )
A : Optional[Any] = accelerator.pad_across_processes(
lowerCamelCase_ , dim=1 , pad_index=tokenizer.pad_token_id )
A , A : Tuple = accelerator.gather((generated_tokens, generated_tasks) )
A : Dict = generated_tokens.cpu().numpy()
A : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowerCamelCase_ , lowerCamelCase_ ):
gen_token_dict[task].append(lowerCamelCase_ )
A : Any = [[] for _ in range(lowerCamelCase_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
A : List[str] = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
code_gens[task].append(remove_last_block(lowerCamelCase_ ) )
return code_gens
def snake_case__ ( ):
# Setup configuration
A : int = HfArgumentParser(lowerCamelCase_ )
A : str = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
A : str = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
A : List[str] = '''false'''
if args.num_workers is None:
A : str = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
A : List[Any] = Accelerator()
set_seed(args.seed , device_specific=lowerCamelCase_ )
# Load model and tokenizer
A : int = AutoTokenizer.from_pretrained(args.model_ckpt )
A : Optional[Any] = tokenizer.eos_token
A : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
A : Any = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , lowerCamelCase_ , lowerCamelCase_ )] ),
}
# Load evaluation dataset and metric
A : Tuple = load_dataset('''openai_humaneval''' )
A : Tuple = load_metric('''code_eval''' )
A : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
A : str = args.n_samples // args.batch_size
A : Any = TokenizedDataset(lowerCamelCase_ , human_eval['''test'''] , n_copies=lowerCamelCase_ , n_tasks=lowerCamelCase_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
A : str = DataLoader(lowerCamelCase_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
A : Union[str, Any] = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
A , A : Any = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
A : str = complete_code(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , n_tasks=lowerCamelCase_ , batch_size=args.batch_size , **lowerCamelCase_ , )
if accelerator.is_main_process:
A : List[str] = []
for task in tqdm(range(lowerCamelCase_ ) ):
A : str = human_eval['''test'''][task]['''test''']
A : Tuple = F'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
A , A : Any = code_eval_metric.compute(
references=lowerCamelCase_ , predictions=lowerCamelCase_ , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 423
|
lowercase : Tuple = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 423
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase_ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY)
def __magic_name__ ( ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[Any] = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def __magic_name__ ( ) -> int:
"""simple docstring"""
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __magic_name__ ( ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __magic_name__ ( ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[int] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowercase_ : Any = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def __magic_name__ ( ) -> Any:
"""simple docstring"""
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def __magic_name__ ( ) -> Any:
"""simple docstring"""
lowercase_ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowercase_ : Tuple = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def __magic_name__ ( ) -> List[Any]:
"""simple docstring"""
assert med.median_filter(lowercase , 3 ).any()
def __magic_name__ ( ) -> str:
"""simple docstring"""
lowercase_ , lowercase_ : Optional[Any] = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def __magic_name__ ( ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[int] = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" ) -> str:
"""simple docstring"""
lowercase_ : int = bs.Burkes(imread(lowercase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
"""simple docstring"""
lowercase_ : int = rs.NearestNeighbour(imread(lowercase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __magic_name__ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
lowercase_ : Optional[Any] = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
lowercase_ : List[str] = 0
lowercase_ : List[str] = 0
lowercase_ : List[str] = image[x_coordinate][y_coordinate]
lowercase_ : Union[str, Any] = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowercase_ : Union[str, Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowercase_ : int = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 458
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655
| 0
|
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : int = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : str = 0
while number > 0:
_lowerCAmelCase : Any = number % 10
sum_of_digits += last_digit
_lowerCAmelCase : Any = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCamelCase_ ( lowerCAmelCase__ = 1_00 ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = factorial(_A )
_lowerCAmelCase : Optional[Any] = split_and_add(_A )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 719
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
if "model" in orig_key:
_lowerCAmelCase : Any = orig_key.replace("model." , "" )
if "norm1" in orig_key:
_lowerCAmelCase : Dict = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
_lowerCAmelCase : List[Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
_lowerCAmelCase : List[str] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.split("." )[0].split("_" )[-1]
_lowerCAmelCase : Union[str, Any] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
_lowerCAmelCase : Any = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
_lowerCAmelCase : int = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
_lowerCAmelCase : Optional[Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
_lowerCAmelCase : Dict = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
_lowerCAmelCase : Tuple = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
_lowerCAmelCase : str = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
_lowerCAmelCase : Dict = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
_lowerCAmelCase : Tuple = "yoso." + orig_key
return orig_key
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : List[Any] = orig_state_dict.pop(lowerCAmelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowerCAmelCase : int = val
_lowerCAmelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"]
_lowerCAmelCase : Dict = torch.arange(lowerCAmelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = torch.load(lowerCAmelCase__ , map_location="cpu" )["model_state_dict"]
_lowerCAmelCase : List[Any] = YosoConfig.from_json_file(lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = YosoForMaskedLM(lowerCAmelCase__ )
_lowerCAmelCase : Dict = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase__ )
print(model.load_state_dict(lowerCAmelCase__ ) )
model.eval()
model.save_pretrained(lowerCAmelCase__ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 587
| 0
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
lowerCAmelCase : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase__)} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """The input training data file (a text file)."""})
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""})
lowerCAmelCase_ = field(default=UpperCAmelCase__ , metadata={"""help""": """Whether ot not to use whole word mask."""})
lowerCAmelCase_ = field(
default=0.1_5 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""})
lowerCAmelCase_ = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
lowerCAmelCase_ = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""})
lowerCAmelCase_ = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
def A_( A : Optional[Any] , A : Dict , A : int = False , A : Dict = None , ):
def _dataset(A : Union[str, Any] , A : Union[str, Any]=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask')
return LineByLineWithRefDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , ref_path=snake_case__ , )
return LineByLineTextDataset(tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size)
else:
return TextDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(snake_case__) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file , args.train_ref_file)
def A_( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.')
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.')
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , snake_case__)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if model_args.tokenizer_name:
UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name')
if model_args.model_name_or_path:
UpperCamelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch')
UpperCamelCase = AutoModelWithLMHead.from_config(snake_case__)
model.resize_token_embeddings(len(snake_case__))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).')
if data_args.block_size <= 0:
UpperCamelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCamelCase = min(data_args.block_size , tokenizer.max_len)
# Get datasets
UpperCamelCase = (
get_dataset(snake_case__ , tokenizer=snake_case__ , cache_dir=model_args.cache_dir) if training_args.do_train else None
)
UpperCamelCase = (
get_dataset(snake_case__ , tokenizer=snake_case__ , evaluate=snake_case__ , cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCamelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=snake_case__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCamelCase = DataCollatorForWholeWordMask(
tokenizer=snake_case__ , mlm_probability=data_args.mlm_probability)
else:
UpperCamelCase = DataCollatorForLanguageModeling(
tokenizer=snake_case__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
UpperCamelCase = Trainer(
model=snake_case__ , args=snake_case__ , data_collator=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , prediction_loss_only=snake_case__ , )
# Training
if training_args.do_train:
UpperCamelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=snake_case__)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
UpperCamelCase = trainer.evaluate()
UpperCamelCase = math.exp(eval_output['eval_loss'])
UpperCamelCase = {'perplexity': perplexity}
UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results_lm.txt')
if trainer.is_world_master():
with open(snake_case__ , 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s' , snake_case__ , str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
results.update(snake_case__)
return results
def A_( A : List[str]):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 3
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , _a , _a , _a = None , _a = 50_257 , _a = 1_024 , _a = 768 , _a = 12 , _a = 12 , _a = None , _a = "gelu_new" , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 1e-5 , _a = 0.02 , _a = True , _a = True , _a = False , _a = False , ):
"""simple docstring"""
super().__init__()
lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
lowerCamelCase = prefix_inner_dim
lowerCamelCase = prefix_hidden_dim
lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , _a ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase = GPTaConfig(
vocab_size=_a , n_positions=_a , n_embd=_a , n_layer=_a , n_head=_a , n_inner=_a , activation_function=_a , resid_pdrop=_a , embd_pdrop=_a , attn_pdrop=_a , layer_norm_epsilon=_a , initializer_range=_a , scale_attn_weights=_a , use_cache=_a , scale_attn_by_inverse_layer_idx=_a , reorder_and_upcast_attn=_a , )
lowerCamelCase = GPTaLMHeadModel(_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , _a = None , ):
"""simple docstring"""
lowerCamelCase = self.transformer.transformer.wte(_a )
lowerCamelCase = self.encode_prefix(_a )
lowerCamelCase = self.decode_prefix(_a )
lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCamelCase = self.transformer(inputs_embeds=_a , labels=_a , attention_mask=_a )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
return torch.zeros(_a , self.prefix_length , dtype=torch.intaa , device=_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.encode_prefix(_a )
@torch.no_grad()
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = torch.split(_a , 1 , dim=0 )
lowerCamelCase = []
lowerCamelCase = []
for feature in features:
lowerCamelCase = self.decode_prefix(feature.to(_a ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase , lowerCamelCase = self.generate_beam(
input_embeds=_a , device=_a , eos_token_id=_a )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase = torch.stack(_a )
lowerCamelCase = torch.stack(_a )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , _a = 5 , _a = 67 , _a = 1.0 , _a = None , ):
"""simple docstring"""
lowerCamelCase = eos_token_id
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = torch.ones(_a , device=_a , dtype=torch.int )
lowerCamelCase = torch.zeros(_a , device=_a , dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase = input_embeds
else:
lowerCamelCase = self.transformer.transformer.wte(_a )
for i in range(_a ):
lowerCamelCase = self.transformer(inputs_embeds=_a )
lowerCamelCase = outputs.logits
lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase , lowerCamelCase = logits.topk(_a , -1 )
lowerCamelCase = generated.expand(_a , *generated.shape[1:] )
lowerCamelCase , lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase = next_tokens
else:
lowerCamelCase = tokens.expand(_a , *tokens.shape[1:] )
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCamelCase = -float(np.inf )
lowerCamelCase = 0
lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase = scores_sum / seq_lengths[:, None]
lowerCamelCase , lowerCamelCase = scores_sum_average.view(-1 ).topk(_a , -1 )
lowerCamelCase = next_tokens // scores_sum.shape[1]
lowerCamelCase = seq_lengths[next_tokens_source]
lowerCamelCase = next_tokens % scores_sum.shape[1]
lowerCamelCase = next_tokens.unsqueeze(1 )
lowerCamelCase = tokens[next_tokens_source]
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
lowerCamelCase = generated[next_tokens_source]
lowerCamelCase = scores_sum_average * seq_lengths
lowerCamelCase = is_stopped[next_tokens_source]
lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
lowerCamelCase = is_stopped + next_tokens.eq(_a ).squeeze()
if is_stopped.all():
break
lowerCamelCase = scores / seq_lengths
lowerCamelCase = scores.argsort(descending=_a )
# tokens tensors are already padded to max_seq_length
lowerCamelCase = [tokens[i] for i in order]
lowerCamelCase = torch.stack(_a , dim=0 )
lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 543
| 0
|
UpperCamelCase_ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 714
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =torch.load(A , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase__ =torch.load(A , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase__ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(A )
UpperCAmelCase__ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase__ =sd.pop(A )
UpperCAmelCase__ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase__ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase__ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase__ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase__ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase__ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =torch.split(A , depth // 3 , dim=0 )
UpperCAmelCase__ =q
UpperCAmelCase__ =k
UpperCAmelCase__ =v
del sd[key]
return sd
@torch.no_grad()
def _UpperCAmelCase ( A , A , A=None ):
'''simple docstring'''
UpperCAmelCase__ =load_checkpoint(A )
if config is not None:
UpperCAmelCase__ =OPTConfig.from_pretrained(A )
else:
UpperCAmelCase__ =OPTConfig()
UpperCAmelCase__ =OPTModel(A ).half().eval()
model.load_state_dict(A )
# Check results
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCamelCase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 510
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
# TODO Update this
__SCREAMING_SNAKE_CASE : Any = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase_ ( __A ):
_lowerCamelCase = 'esm'
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_026 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_="absolute" , lowercase_=True , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ):
super().__init__(pad_token_id=lowerCAmelCase_ , mask_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case : str = vocab_size
_snake_case : int = hidden_size
_snake_case : List[Any] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Dict = initializer_range
_snake_case : Dict = layer_norm_eps
_snake_case : Any = position_embedding_type
_snake_case : Any = use_cache
_snake_case : Union[str, Any] = emb_layer_norm_before
_snake_case : List[str] = token_dropout
_snake_case : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_snake_case : str = EsmFoldConfig()
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Tuple = EsmFoldConfig(**lowerCAmelCase_ )
_snake_case : int = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_snake_case : Dict = get_default_vocab_list()
else:
_snake_case : Optional[int] = vocab_list
else:
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase_ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase_ :
_lowerCamelCase = None
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = 128
_lowerCamelCase = None
def UpperCamelCase ( self ):
if self.trunk is None:
_snake_case : List[str] = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase_ ):
_snake_case : Dict = TrunkConfig(**self.trunk )
def UpperCamelCase ( self ):
_snake_case : List[Any] = asdict(self )
_snake_case : Dict = self.trunk.to_dict()
return output
@dataclass
class lowercase_ :
_lowerCamelCase = 48
_lowerCamelCase = 1_024
_lowerCamelCase = 128
_lowerCamelCase = 32
_lowerCamelCase = 32
_lowerCamelCase = 32
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = False
_lowerCamelCase = 4
_lowerCamelCase = 128
_lowerCamelCase = None
def UpperCamelCase ( self ):
if self.structure_module is None:
_snake_case : int = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase_ ):
_snake_case : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_snake_case : Dict = self.sequence_state_dim // self.sequence_head_width
_snake_case : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = asdict(self )
_snake_case : int = self.structure_module.to_dict()
return output
@dataclass
class lowercase_ :
_lowerCamelCase = 384
_lowerCamelCase = 128
_lowerCamelCase = 16
_lowerCamelCase = 128
_lowerCamelCase = 12
_lowerCamelCase = 4
_lowerCamelCase = 8
_lowerCamelCase = 0.1
_lowerCamelCase = 8
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 7
_lowerCamelCase = 10
_lowerCamelCase = 1E-8
_lowerCamelCase = 1E5
def UpperCamelCase ( self ):
return asdict(self )
def snake_case () -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 670
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ (enum.Enum ):
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
__magic_name__ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : List[Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[Any] ) -> Optional[int]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : Any = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[int] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
UpperCAmelCase_ : List[Any] = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : Optional[int] = {**self._forward_params, **forward_params}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Optional[Any] , ) -> int:
UpperCAmelCase_ : Union[str, Any] = {}
if prefix is not None:
UpperCAmelCase_ : Tuple = prefix
if prefix:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
UpperCAmelCase_ : List[str] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, 'hole']" )
UpperCAmelCase_ : Dict = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = generate_kwargs
UpperCAmelCase_ : Dict = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : Tuple = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : int = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : int = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCAmelCase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self : List[Any] , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]="" , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Optional[Any] ) -> Dict:
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
UpperCAmelCase_ : Any = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : Optional[Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : Dict = generate_kwargs["max_new_tokens"]
else:
UpperCAmelCase_ : List[str] = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Tuple = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCAmelCase_ : Dict = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Union[str, Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Dict:
UpperCAmelCase_ : Optional[Any] = model_inputs["input_ids"]
UpperCAmelCase_ : str = model_inputs.get("attention_mask" , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = 1
else:
UpperCAmelCase_ : Union[str, Any] = input_ids.shape[0]
UpperCAmelCase_ : Any = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : Any = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCAmelCase_ : str = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Tuple = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : Optional[int] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : int = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : Optional[int] = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : List[Any] = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]=ReturnType.FULL_TEXT , lowerCAmelCase_ : Dict=True ) -> List[str]:
UpperCAmelCase_ : List[Any] = model_outputs["generated_sequence"][0]
UpperCAmelCase_ : int = model_outputs["input_ids"]
UpperCAmelCase_ : List[str] = model_outputs["prompt_text"]
UpperCAmelCase_ : Union[str, Any] = generated_sequence.numpy().tolist()
UpperCAmelCase_ : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : str = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : List[Any] = 0
else:
UpperCAmelCase_ : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : Union[str, Any] = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Dict = text[prompt_length:]
UpperCAmelCase_ : int = {"generated_text": all_text}
records.append(lowerCAmelCase_ )
return records
| 95
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : int=30 , UpperCAmelCase : Optional[int]=400 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase : int=[0.5, 0.5, 0.5] , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=1 / 255 , UpperCAmelCase : str=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase : Any = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__lowerCamelCase : Any = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : List[Any] = min_resolution
__lowerCamelCase : List[str] = max_resolution
__lowerCamelCase : Optional[int] = do_resize
__lowerCamelCase : Dict = size
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : Dict = image_mean
__lowerCamelCase : int = image_std
__lowerCamelCase : Optional[int] = do_rescale
__lowerCamelCase : List[str] = rescale_factor
__lowerCamelCase : Union[str, Any] = do_pad
def lowerCamelCase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any]=False ):
if not batched:
__lowerCamelCase : Any = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
__lowerCamelCase , __lowerCamelCase : Dict = image.size
else:
__lowerCamelCase , __lowerCamelCase : int = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Dict = int(self.size["shortest_edge"] * h / w )
__lowerCamelCase : Tuple = self.size["shortest_edge"]
elif w > h:
__lowerCamelCase : Optional[int] = self.size["shortest_edge"]
__lowerCamelCase : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
__lowerCamelCase : Tuple = self.size["shortest_edge"]
__lowerCamelCase : int = self.size["shortest_edge"]
else:
__lowerCamelCase : Any = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase : List[Any] = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
__lowerCamelCase : Union[str, Any] = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = DetaImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Optional[int] = DetaImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_rescale" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_pad" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Optional[int] ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Tuple ):
# Initialize image_processing
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Any = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : List[str] = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : int ):
# Initialize image_processing
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : List[Any] = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase__ ( self : str ):
# prepare image and target
__lowerCamelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__lowerCamelCase : str = json.loads(f.read() )
__lowerCamelCase : Dict = {"image_id": 39769, "annotations": target}
# encode them
__lowerCamelCase : Dict = DetaImageProcessor()
__lowerCamelCase : Tuple = image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , return_tensors="pt" )
# verify pixel values
__lowerCamelCase : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase )
__lowerCamelCase : List[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase , atol=1E-4 ) )
# verify area
__lowerCamelCase : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase ) )
# verify boxes
__lowerCamelCase : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase )
__lowerCamelCase : int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__lowerCamelCase : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase ) )
# verify is_crowd
__lowerCamelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase ) )
# verify class_labels
__lowerCamelCase : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase ) )
# verify orig_size
__lowerCamelCase : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase ) )
# verify size
__lowerCamelCase : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase ) )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
# prepare image, target and masks_path
__lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__lowerCamelCase : Union[str, Any] = json.loads(f.read() )
__lowerCamelCase : Any = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__lowerCamelCase : Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__lowerCamelCase : Optional[int] = DetaImageProcessor(format="coco_panoptic" )
__lowerCamelCase : Tuple = image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , masks_path=UpperCAmelCase , return_tensors="pt" )
# verify pixel values
__lowerCamelCase : int = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase )
__lowerCamelCase : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase , atol=1E-4 ) )
# verify area
__lowerCamelCase : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase ) )
# verify boxes
__lowerCamelCase : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase )
__lowerCamelCase : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase , atol=1E-3 ) )
# verify image_id
__lowerCamelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase ) )
# verify is_crowd
__lowerCamelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase ) )
# verify class_labels
__lowerCamelCase : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase ) )
# verify masks
__lowerCamelCase : List[Any] = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCAmelCase )
# verify orig_size
__lowerCamelCase : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase ) )
# verify size
__lowerCamelCase : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase ) )
| 366
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__A = 4
__A = 3
class _snake_case ( a__ ):
pass
def lowercase_ ( _lowerCamelCase: List[str] ) -> List[str]:
'''simple docstring'''
for shard in shards:
for i in range(_lowerCamelCase ):
yield {"i": i, "shard": shard}
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : List[Any] = int(os.environ["RANK"] )
__lowerCamelCase : Optional[int] = int(os.environ["WORLD_SIZE"] )
__lowerCamelCase : Any = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase )
parser.add_argument("--local_rank" , type=_lowerCamelCase )
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0 )
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : str = args.streaming
__lowerCamelCase : List[Any] = args.num_workers
__lowerCamelCase : Optional[Any] = {"shards": [F"""shard_{shard_idx}""" for shard_idx in range(_lowerCamelCase )]}
__lowerCamelCase : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase )
if not streaming:
__lowerCamelCase : Optional[int] = Dataset.from_list(list(_lowerCamelCase ) )
__lowerCamelCase : Union[str, Any] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase )
__lowerCamelCase : List[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__lowerCamelCase : Optional[Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__lowerCamelCase : Optional[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 366
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase_ ( a__ ,a__ ,unittest.TestCase):
"""simple docstring"""
snake_case_ = IFInpaintingSuperResolutionPipeline
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''})
snake_case_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
"""simple docstring"""
if str(__a ).startswith("""mps""" ):
a_ = torch.manual_seed(__a )
else:
a_ = torch.Generator(device=__a ).manual_seed(__a )
a_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(__a ) ).to(__a )
a_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
a_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
a_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowercase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 483
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = """pegasus"""
lowerCAmelCase_ : Dict = ["""past_key_values"""]
lowerCAmelCase_ : str = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self ,__a=50_265 ,__a=1_024 ,__a=12 ,__a=4_096 ,__a=16 ,__a=12 ,__a=4_096 ,__a=16 ,__a=0.0 ,__a=0.0 ,__a=True ,__a=True ,__a="gelu" ,__a=1_024 ,__a=0.1 ,__a=0.0 ,__a=0.0 ,__a=0.02 ,__a=0 ,__a=False ,__a=0 ,__a=1 ,__a=1 ,**__a ,) -> int:
snake_case : List[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : List[Any] = d_model
snake_case : str = encoder_ffn_dim
snake_case : List[Any] = encoder_layers
snake_case : Optional[Any] = encoder_attention_heads
snake_case : Union[str, Any] = decoder_ffn_dim
snake_case : Tuple = decoder_layers
snake_case : Union[str, Any] = decoder_attention_heads
snake_case : Union[str, Any] = dropout
snake_case : int = attention_dropout
snake_case : int = activation_dropout
snake_case : Optional[Any] = activation_function
snake_case : Tuple = init_std
snake_case : Union[str, Any] = encoder_layerdrop
snake_case : int = decoder_layerdrop
snake_case : Dict = use_cache
snake_case : Dict = encoder_layers
snake_case : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__a ,eos_token_id=__a ,is_encoder_decoder=__a ,decoder_start_token_id=__a ,forced_eos_token_id=__a ,**__a ,)
@property
def snake_case_ ( self ) -> int:
return self.encoder_attention_heads
@property
def snake_case_ ( self ) -> int:
return self.d_model
| 116
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
def lowercase ( self ) -> Any:
"""simple docstring"""
return {}
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_UpperCamelCase = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
__lowercase : List[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = MarkupLMFeatureExtractionTester(self )
@property
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class()
# Test not batched input
_UpperCamelCase = get_html_strings()[0]
_UpperCamelCase = feature_extractor(lowerCamelCase_ )
# fmt: off
_UpperCamelCase = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_UpperCamelCase = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , lowerCamelCase_ )
self.assertEqual(encoding.xpaths , lowerCamelCase_ )
# Test batched
_UpperCamelCase = get_html_strings()
_UpperCamelCase = feature_extractor(lowerCamelCase_ )
# fmt: off
_UpperCamelCase = expected_nodes + [["My First Heading", "My first paragraph."]]
_UpperCamelCase = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCamelCase_ )
self.assertEqual(encoding.xpaths , lowerCamelCase_ )
| 589
| 0
|
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
UpperCAmelCase = [True] * (num + 1)
UpperCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
UpperCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 341
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
if not isinstance(a_, a_ ):
_UpperCAmelCase : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(a_ )
if number < 0:
return False
_UpperCAmelCase : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 494
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "vit_msn"
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-06 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : List[str] = layer_norm_eps
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : Any = patch_size
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : List[str] = qkv_bias
| 599
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599
| 1
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __A ( UpperCamelCase__ ):
a__ : List[Any] = ["""vqvae"""]
def __init__(self : List[Any] , __a : AutoencoderKL , __a : UNetaDConditionModel , __a : Mel , __a : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a )
def _lowercase (self : Optional[int] ):
return 50 if isinstance(self.scheduler , __a ) else 1000
@torch.no_grad()
def __call__(self : Union[str, Any] , __a : int = 1 , __a : str = None , __a : np.ndarray = None , __a : int = 0 , __a : int = 0 , __a : int = None , __a : torch.Generator = None , __a : float = 0 , __a : float = 0 , __a : torch.Generator = None , __a : float = 0 , __a : torch.Tensor = None , __a : torch.Tensor = None , __a : Tuple=True , ):
UpperCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a )
UpperCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
UpperCAmelCase_ = noise
UpperCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a )
UpperCAmelCase_ = self.mel.audio_slice_to_image(__a )
UpperCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase_ = (input_image / 255) * 2 - 1
UpperCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase_ = self.vqvae.encode(torch.unsqueeze(__a , 0 ) ).latent_dist.sample(
generator=__a )[0]
UpperCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase_ = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase_ = int(mask_start_secs * pixels_per_second )
UpperCAmelCase_ = int(mask_end_secs * pixels_per_second )
UpperCAmelCase_ = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __a ):
UpperCAmelCase_ = self.unet(__a , __a , __a )["sample"]
else:
UpperCAmelCase_ = self.unet(__a , __a )["sample"]
if isinstance(self.scheduler , __a ):
UpperCAmelCase_ = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )["prev_sample"]
else:
UpperCAmelCase_ = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )["prev_sample"]
if mask is not None:
if mask_start > 0:
UpperCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase_ = self.vqvae.decode(__a )["sample"]
UpperCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase_ = (images * 255).round().astype("uint8" )
UpperCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode="RGB" ).convert("L" ) for _ in images) )
UpperCAmelCase_ = [self.mel.image_to_audio(__a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a )[:, np.newaxis, :] ) , **ImagePipelineOutput(__a ) )
@torch.no_grad()
def _lowercase (self : Optional[Any] , __a : List[Image.Image] , __a : int = 50 ):
assert isinstance(self.scheduler , __a )
self.scheduler.set_timesteps(__a )
UpperCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase_ = (sample / 255) * 2 - 1
UpperCAmelCase_ = torch.Tensor(__a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase_ = self.scheduler.alphas_cumprod[t]
UpperCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = self.unet(__a , __a )["sample"]
UpperCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase (__a : torch.Tensor , __a : torch.Tensor , __a : float ):
UpperCAmelCase_ = acos(torch.dot(torch.flatten(__a ) , torch.flatten(__a ) ) / torch.norm(__a ) / torch.norm(__a ) )
return sin((1 - alpha) * theta ) * xa / sin(__a ) + sin(alpha * theta ) * xa / sin(__a )
| 78
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A (__UpperCAmelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MvpTokenizer
_SCREAMING_SNAKE_CASE = MvpTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = filter_roberta_detectors
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_snake_case : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_snake_case : Tuple = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_snake_case : str = {'''unk_token''': '''<unk>'''}
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def __a ( self , **lowercase_ ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __a ( self , **lowercase_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __a ( self , lowercase_ ) -> Any:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __a ( self ) -> int:
'''simple docstring'''
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def __a ( self ) -> str:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def __a ( self ) -> List[str]:
'''simple docstring'''
_snake_case : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_snake_case : Optional[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Optional[int] = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors='''pt''' )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
# Test that special tokens are reset
@require_torch
def __a ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : str = tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , lowercase_ )
self.assertIn('''attention_mask''' , lowercase_ )
self.assertNotIn('''labels''' , lowercase_ )
self.assertNotIn('''decoder_attention_mask''' , lowercase_ )
@require_torch
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : List[str] = tokenizer(text_target=lowercase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __a ( self ) -> Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Union[str, Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def __a ( self ) -> int:
'''simple docstring'''
_snake_case : Dict = ['''A long paragraph for summarization.''']
_snake_case : List[str] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Dict = tokenizer(lowercase_ , text_target=lowercase_ , return_tensors='''pt''' )
_snake_case : List[Any] = inputs['''input_ids''']
_snake_case : Dict = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __a ( self ) -> List[Any]:
'''simple docstring'''
pass
def __a ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_snake_case : str = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_snake_case : Optional[Any] = '''A, <mask> AllenNLP sentence.'''
_snake_case : Optional[Any] = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
_snake_case : str = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_snake_case : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_snake_case : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 326
| 0
|
from __future__ import annotations
import bisect
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> int:
if hi < 0:
lowercase__ = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
lowercase__ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase__ = mid + 1
else:
lowercase__ = mid
return lo
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> int:
if hi < 0:
lowercase__ = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
lowercase__ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase__ = mid + 1
else:
lowercase__ = mid
return lo
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> None:
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> None:
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | None:
lowercase__ = 0
lowercase__ = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
lowercase__ = left + (right - left) // 2
lowercase__ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase__ = midpoint - 1
else:
lowercase__ = midpoint + 1
return None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | None:
lowercase__ = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | None:
if right < left:
return None
lowercase__ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by comma:\n""").strip()
lowercase_ = sorted(int(item) for item in user_input.split(""","""))
lowercase_ = int(input("""Enter a single number to be found in the list:\n"""))
lowercase_ = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=() , UpperCAmelCase_=None , UpperCAmelCase_="no" , UpperCAmelCase_="29500" ) ->List[str]:
"""simple docstring"""
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[int] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
__UpperCAmelCase : str = True
elif "IPython" in sys.modules:
__UpperCAmelCase : Union[str, Any] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
__UpperCAmelCase : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , UpperCAmelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
__UpperCAmelCase : Optional[Any] = 8
__UpperCAmelCase : List[str] = PrepareForLaunch(UpperCAmelCase_ , distributed_type='''TPU''' )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCAmelCase_ , args=UpperCAmelCase_ , nprocs=UpperCAmelCase_ , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*UpperCAmelCase_ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase_ , master_addr='''127.0.01''' , master_port=UpperCAmelCase_ , mixed_precision=UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = PrepareForLaunch(UpperCAmelCase_ , distributed_type='''MULTI_GPU''' )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCAmelCase_ , args=UpperCAmelCase_ , nprocs=UpperCAmelCase_ , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__UpperCAmelCase : Optional[Any] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=() , UpperCAmelCase_=2 ) ->List[Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase_ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
__UpperCAmelCase : Optional[Any] = PrepareForLaunch(UpperCAmelCase_ , debug=UpperCAmelCase_ )
start_processes(UpperCAmelCase_ , args=UpperCAmelCase_ , nprocs=UpperCAmelCase_ , start_method='''fork''' )
| 522
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :Union[str, Any] = {'vocab_file': 'vocab.txt'}
lowercase__ :int = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
lowercase__ :Dict = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
lowercase__ :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : int = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_INIT_CONFIGURATION
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = ConvBertTokenizer
def __init__( self : int , __lowercase : List[Any]=None , __lowercase : int=None , __lowercase : Any=True , __lowercase : Dict="[UNK]" , __lowercase : Dict="[SEP]" , __lowercase : Dict="[PAD]" , __lowercase : int="[CLS]" , __lowercase : int="[MASK]" , __lowercase : List[str]=True , __lowercase : Optional[int]=None , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase : Optional[Any] = getattr(__lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase : Any = do_lower_case
__UpperCAmelCase : int = strip_accents
__UpperCAmelCase : List[str] = tokenize_chinese_chars
__UpperCAmelCase : Optional[Any] = normalizer_class(**__lowercase )
__UpperCAmelCase : Any = do_lower_case
def A_ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Dict=None ):
'''simple docstring'''
__UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 522
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _A ( _lowerCamelCase ):
_UpperCamelCase : jnp.ndarray
@flax_register_to_config
class _A ( nn.Module , _lowerCamelCase , _lowerCamelCase ):
_UpperCamelCase : int = 3_2
_UpperCamelCase : int = 4
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1_2_8_0
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : bool = False
def __a ( self : Dict , _A : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
lowercase : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase : List[str] = jnp.zeros(_A , dtype=jnp.floataa )
lowercase : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowercase : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase , lowercase : int = jax.random.split(_A )
lowercase : Any = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_A , _A , _A , _A )["params"]
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] = self.block_out_channels
lowercase : str = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase : Tuple = self.num_attention_heads or self.attention_head_dim
# input
lowercase : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase : str = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase : List[Any] = FlaxTimestepEmbedding(_A , dtype=self.dtype )
lowercase : Any = self.only_cross_attention
if isinstance(_A , _A ):
lowercase : Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
lowercase : Any = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase : Dict = []
lowercase : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowercase : List[str] = output_channel
lowercase : int = block_out_channels[i]
lowercase : str = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase : Dict = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowercase : int = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
lowercase : Union[str, Any] = down_blocks
# mid
lowercase : List[str] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowercase : Union[str, Any] = []
lowercase : str = list(reversed(_A ) )
lowercase : int = list(reversed(_A ) )
lowercase : Union[str, Any] = list(reversed(_A ) )
lowercase : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowercase : List[Any] = output_channel
lowercase : Any = reversed_block_out_channels[i]
lowercase : Optional[int] = reversed_block_out_channels[min(i + 1 , len(_A ) - 1 )]
lowercase : Tuple = i == len(_A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowercase : Union[str, Any] = FlaxCrossAttnUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowercase : Union[str, Any] = FlaxUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_A )
lowercase : List[str] = output_channel
lowercase : int = up_blocks
# out
lowercase : int = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict , _A : List[str]=None , _A : int=None , _A : bool = True , _A : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
"""simple docstring"""
if not isinstance(_A , jnp.ndarray ):
lowercase : Optional[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
lowercase : str = jnp.expand_dims(_A , 0 )
lowercase : Optional[int] = self.time_proj(_A )
lowercase : str = self.time_embedding(_A )
# 2. pre-process
lowercase : Dict = jnp.transpose(_A , (0, 2, 3, 1) )
lowercase : str = self.conv_in(_A )
# 3. down
lowercase : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
lowercase , lowercase : Dict = down_block(_A , _A , _A , deterministic=not train )
else:
lowercase , lowercase : int = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowercase : Optional[int] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_A , _A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowercase : Optional[int] = new_down_block_res_samples
# 4. mid
lowercase : Optional[int] = self.mid_block(_A , _A , _A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowercase : str = down_block_res_samples[-(self.layers_per_block + 1) :]
lowercase : Any = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_A , _A ):
lowercase : int = up_block(
_A , temb=_A , encoder_hidden_states=_A , res_hidden_states_tuple=_A , deterministic=not train , )
else:
lowercase : Any = up_block(_A , temb=_A , res_hidden_states_tuple=_A , deterministic=not train )
# 6. post-process
lowercase : Union[str, Any] = self.conv_norm_out(_A )
lowercase : List[str] = nn.silu(_A )
lowercase : int = self.conv_out(_A )
lowercase : Any = jnp.transpose(_A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_A )
| 596
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case( __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : int = [-1] * len(__magic_name__ )
def dfs(__magic_name__ , __magic_name__ ):
lowercase : str = True
lowercase : Tuple = c
for u in graph[v]:
if not visited[u]:
dfs(__magic_name__ , 1 - c )
for i in range(len(__magic_name__ ) ):
if not visited[i]:
dfs(__magic_name__ , 0 )
for i in range(len(__magic_name__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 596
| 1
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( UpperCAmelCase ):
_a : List[str] = ['image_processor', 'tokenizer']
_a : str = 'FlavaImageProcessor'
_a : Dict = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = kwargs.pop('feature_extractor' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if images is not None:
_UpperCAmelCase = self.image_processor(
_SCREAMING_SNAKE_CASE , return_image_mask=_SCREAMING_SNAKE_CASE , return_codebook_pixels=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if text is not None and images is not None:
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 618
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = FlaxViTModel(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase = (self.patch_size, self.patch_size)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FlaxViTForImageClassification(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FlaxViTForImageClassification(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = FlaxViTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return model(pixel_values=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with self.subTest('JIT Enabled' ):
_UpperCAmelCase = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('google/vit-base-patch16-224' )
_UpperCAmelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 618
| 1
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase : List[str] = 'base_with_context'
def _A ( A ,A ) -> str:
lowercase : Dict = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowercase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase : Optional[int] = weights[F'''layers_{lyr_num}''']
lowercase : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowercase : Dict = ly_weight['''attention''']
lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _A ( A ,A ) -> List[Any]:
lowercase : List[str] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowercase : Dict = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase : Optional[Any] = weights[F'''layers_{lyr_num}''']
lowercase : Union[str, Any] = ly_weight['''attention''']
lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase : str = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _A ( A ,A ) -> Optional[Any]:
lowercase : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowercase : Dict = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_lowercase )
lowercase : List[str] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowercase : str = weights[F'''layers_{lyr_num}''']
lowercase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowercase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowercase : str = ly_weight['''self_attention''']
lowercase : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase : List[str] = ly_weight['''MultiHeadDotProductAttention_0''']
lowercase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowercase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowercase : List[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def _A ( A ) -> List[Any]:
lowercase : Any = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowercase : Tuple = jnp.tree_util.tree_map(onp.array ,_lowercase )
lowercase : Optional[int] = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
lowercase : int = os.path.join(args.checkpoint_path ,".." ,"config.gin" )
lowercase : Union[str, Any] = inference.parse_training_gin_file(_lowercase ,_lowercase )
lowercase : Optional[int] = inference.InferenceModel(args.checkpoint_path ,_lowercase )
lowercase : Optional[Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ,variance_type="fixed_large" )
lowercase : Optional[int] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="gated-gelu" ,)
lowercase : Optional[int] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length["targets_context"] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="gated-gelu" ,)
lowercase : Dict = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length["targets_context"] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
lowercase : Any = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] ,_lowercase )
lowercase : Union[str, Any] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] ,_lowercase )
lowercase : Optional[Any] = load_decoder(ta_checkpoint["target"]["decoder"] ,_lowercase )
lowercase : Union[str, Any] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowercase : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_lowercase ,continuous_encoder=_lowercase ,decoder=_lowercase ,scheduler=_lowercase ,melgan=_lowercase ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
lowerCAmelCase : List[str] = parser.parse_args()
main(args)
| 712
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Dict = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( A ,A ,A ,A ,A ) -> str:
for attribute in key.split("." ):
lowercase : Any = getattr(A ,A )
if weight_type is not None:
lowercase : Optional[Any] = getattr(A ,A ).shape
else:
lowercase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase : Any = value
elif weight_type == "weight_g":
lowercase : Optional[Any] = value
elif weight_type == "weight_v":
lowercase : Tuple = value
elif weight_type == "bias":
lowercase : int = value
else:
lowercase : int = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( A ,A ) -> int:
lowercase : List[Any] = []
lowercase : int = fairseq_model.state_dict()
lowercase : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
A ,A ,A ,A ,hf_model.config.feat_extract_norm == "group" ,)
lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase : Union[str, Any] = True
if "*" in mapped_key:
lowercase : Dict = name.split(A )[0].split("." )[-2]
lowercase : Union[str, Any] = mapped_key.replace("*" ,A )
if "weight_g" in name:
lowercase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
lowercase : Tuple = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowercase : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Any = "weight"
else:
lowercase : Tuple = None
set_recursively(A ,A ,A ,A ,A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( A ,A ,A ,A ,A ) -> Any:
lowercase : Optional[int] = full_name.split("conv_layers." )[-1]
lowercase : Any = name.split("." )
lowercase : Dict = int(items[0] )
lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def _A ( A ,A ,A=None ) -> Optional[Any]:
# load the pre-trained checkpoints
lowercase : Union[str, Any] = torch.load(A )
lowercase : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
lowercase : Tuple = WavLMOrig(A )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowercase : List[str] = WavLMConfig.from_pretrained(A )
else:
lowercase : Union[str, Any] = WavLMConfig()
lowercase : Optional[Any] = WavLMModel(A )
recursively_load_weights(A ,A )
hf_wavlm.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 425
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = XLMRobertaTokenizer
A__ = XLMRobertaTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XLMRobertaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ (self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = """<pad>"""
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_UpperCAmelCase ) , 1002 )
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = XLMRobertaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCamelCase__ (self : Optional[Any] ) -> str:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase__ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowercase__ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
lowercase__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
lowercase__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@cached_property
def lowerCamelCase__ (self : str ) -> str:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCAmelCase , f.name )
lowercase__ = XLMRobertaTokenizer(f.name , keep_accents=_UpperCAmelCase )
lowercase__ = pickle.dumps(_UpperCAmelCase )
pickle.loads(_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = """I was born in 92000, and this is falsé."""
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(_UpperCAmelCase )
lowercase__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """Hello World!"""
lowercase__ = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowercase__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 15
|
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = '''distilbert'''
lowerCAmelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , a=3_05_22 , a=5_12 , a=False , a=6 , a=12 , a=7_68 , a=4 * 7_68 , a=0.1 , a=0.1 , a="gelu" , a=0.02 , a=0.1 , a=0.2 , a=0 , **a , ) -> Optional[int]:
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = sinusoidal_pos_embds
snake_case_ = n_layers
snake_case_ = n_heads
snake_case_ = dim
snake_case_ = hidden_dim
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation
snake_case_ = initializer_range
snake_case_ = qa_dropout
snake_case_ = seq_classif_dropout
super().__init__(**a , pad_token_id=a )
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 607
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> str:
snake_case_ = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('inf' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(a , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a , a , rtol=1E-12 )
tf.debugging.assert_equal(a , a )
@require_tf
class UpperCamelCase_ ( unittest.TestCase , snake_case_ ):
'''simple docstring'''
if is_tf_available():
lowerCAmelCase = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 2
snake_case_ = 2
class UpperCamelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self , a ) -> Any:
super(a , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=a , )
def _UpperCamelCase ( self , a , a ) -> Optional[Any]:
snake_case_ = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_02, 1_03]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(a ).signatures['serving_default']
for batch_size in range(1 , len(a ) + 1 ):
snake_case_ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**a )['sequences']
snake_case_ = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
def _UpperCamelCase ( self ) -> Dict:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 1
snake_case_ = 2
class UpperCamelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self , a ) -> int:
super(a , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=a , )
def _UpperCamelCase ( self , a , a ) -> Union[str, Any]:
snake_case_ = self.model.generate(
input_ids=a , attention_mask=a , max_new_tokens=a , return_dict_in_generate=a , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_02, 1_03]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a , a , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(a ).signatures['serving_default']
for input_row in range(len(a ) ):
snake_case_ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**a )['sequences']
snake_case_ = test_model.generate(**a , max_new_tokens=a )
tf.debugging.assert_equal(a , a )
@slow
@require_tensorflow_text
def _UpperCamelCase ( self ) -> Any:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=a )
class UpperCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ) -> Any:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a , 'spiece.model' ) , 'rb' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def _UpperCamelCase ( self , a , *a , **a ) -> int:
snake_case_ = self.tokenizer.tokenize(a )
snake_case_ , snake_case_ = text.pad_model_inputs(
a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=a , attention_mask=a )
return self.tokenizer.detokenize(a )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
snake_case_ = complete_model(a )
snake_case_ = tf.keras.Model(a , a )
keras_model.save(a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
snake_case_ = 14
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 'Hello, my dog is cute and'
snake_case_ = tokenizer(a , return_tensors='tf' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**a , eos_token_id=a , **a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _UpperCamelCase ( self ) -> Any:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = 'Hugging Face is a technology company based in New York and Paris.'
snake_case_ = bart_tokenizer(a , return_tensors='tf' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(a ).numpy()
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a=None , **a ) -> List[str]:
return super().call(a , **a )
snake_case_ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(a , foo='bar' ).numpy()
self.assertTrue(np.array_equal(a , a ) )
class UpperCamelCase_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def _UpperCamelCase ( self , a , **a ) -> List[Any]:
return super().call(a , **a )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(a ).numpy()
with self.assertRaises(a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a , foo='bar' )
| 607
| 1
|
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any] = 0, lowerCAmelCase : Optional[Any] = 0 ) -> List[str]:
A = right or len(_A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_A, _A, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699
|
from collections.abc import Sequence
def __UpperCamelCase ( _A , _A = False ):
if not arr:
return 0
lowerCAmelCase_ = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase_ = 0.0
for num in arr:
lowerCAmelCase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase_ = max(_A , _A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 431
| 0
|
"""simple docstring"""
_lowerCAmelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __UpperCamelCase ( ):
A_ : Tuple = input("""Enter message: """ )
A_ : Dict = input("""Enter key [alphanumeric]: """ )
A_ : int = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
A_ : str = """encrypt"""
A_ : List[Any] = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith("""d""" ):
A_ : Optional[int] = """decrypt"""
A_ : Tuple = decrypt_message(snake_case__ , snake_case__ )
print(F"""\n{mode.title()}ed message:""" )
print(snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return translate_message(snake_case__ , snake_case__ , """encrypt""" )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return translate_message(snake_case__ , snake_case__ , """decrypt""" )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : List[Any] = []
A_ : Optional[Any] = 0
A_ : Dict = key.upper()
for symbol in message:
A_ : int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
A_ : int = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 480
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( snake_case__ , snake_case__=False ):
A_ : Dict = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Any = """"""
else:
A_ : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
A_ : Union[str, Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : Dict = in_proj_weight[
: config.hidden_size, :
]
A_ : int = in_proj_bias[: config.hidden_size]
A_ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( snake_case__ ):
A_ : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : Union[str, Any] = dct.pop(snake_case__ )
A_ : List[Any] = val
def __UpperCamelCase ( ):
A_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : str = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=False ):
A_ : str = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=snake_case__ , )
A_ : int = ViTHybridConfig(backbone_config=snake_case__ , image_size=384 , num_labels=1_000 )
A_ : Any = False
# load original model from timm
A_ : str = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case__ )
A_ : Dict = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
A_ : Any = """huggingface/label-files"""
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(snake_case__ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : int = ViTHybridModel(snake_case__ ).eval()
else:
A_ : Union[str, Any] = ViTHybridForImageClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# create image processor
A_ : Any = create_transform(**resolve_data_config({} , model=snake_case__ ) )
A_ : List[Any] = transform.transforms
A_ : int = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
A_ : Optional[int] = ViTHybridImageProcessor(
do_resize=snake_case__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=snake_case__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Tuple = prepare_img()
A_ : List[str] = transform(snake_case__ ).unsqueeze(0 )
A_ : int = processor(snake_case__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case__ , snake_case__ )
# verify logits
with torch.no_grad():
A_ : List[str] = model(snake_case__ )
A_ : Any = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
A_ : int = timm_model.forward_features(snake_case__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case__ , outputs.pooler_output , atol=1E-3 )
else:
A_ : Optional[int] = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(F"""ybelkada/{vit_name}""" )
processor.push_to_hub(F"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
_lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 480
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def snake_case__ ( _snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = botoa.client("iam" )
UpperCamelCase__ = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_snake_case , AssumeRolePolicyDocument=json.dumps(_snake_case , indent=2 ) )
UpperCamelCase__ = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_snake_case , PolicyName=F'{role_name}_policy_permission' , PolicyDocument=json.dumps(_snake_case , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = botoa.client("iam" )
return iam_client.get_role(RoleName=_snake_case )["Role"]["Arn"]
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , _snake_case , )
UpperCamelCase__ = None
if credentials_configuration == 0:
UpperCamelCase__ = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
UpperCamelCase__ = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
UpperCamelCase__ = _ask_field("AWS Access Key ID: " )
UpperCamelCase__ = aws_access_key_id
UpperCamelCase__ = _ask_field("AWS Secret Access Key: " )
UpperCamelCase__ = aws_secret_access_key
UpperCamelCase__ = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
UpperCamelCase__ = aws_region
UpperCamelCase__ = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , _snake_case , )
if role_management == 0:
UpperCamelCase__ = _ask_field("Enter your IAM role name: " )
else:
UpperCamelCase__ = "accelerate_sagemaker_execution_role"
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(_snake_case )
UpperCamelCase__ = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=_snake_case , error_message="Please enter yes or no." , )
UpperCamelCase__ = None
if is_custom_docker_image:
UpperCamelCase__ = _ask_field("Enter your Docker image: " , lambda _snake_case : str(_snake_case ).lower() )
UpperCamelCase__ = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=_snake_case , error_message="Please enter yes or no." , )
UpperCamelCase__ = None
if is_sagemaker_inputs_enabled:
UpperCamelCase__ = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda _snake_case : str(_snake_case ).lower() , )
UpperCamelCase__ = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=_snake_case , error_message="Please enter yes or no." , )
UpperCamelCase__ = None
if is_sagemaker_metrics_enabled:
UpperCamelCase__ = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda _snake_case : str(_snake_case ).lower() , )
UpperCamelCase__ = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
UpperCamelCase__ = {}
UpperCamelCase__ = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=_snake_case , error_message="Please enter yes or no." , )
if use_dynamo:
UpperCamelCase__ = "dynamo_"
UpperCamelCase__ = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
UpperCamelCase__ = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=_snake_case , error_message="Please enter yes or no." , )
if use_custom_options:
UpperCamelCase__ = _ask_options(
"Which mode do you want to use?" , _snake_case , lambda _snake_case : TORCH_DYNAMO_MODES[int(_snake_case )] , default="default" , )
UpperCamelCase__ = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=_snake_case , error_message="Please enter yes or no." , )
UpperCamelCase__ = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=_snake_case , error_message="Please enter yes or no." , )
UpperCamelCase__ = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
UpperCamelCase__ = _ask_options(
_snake_case , _snake_case , lambda _snake_case : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_snake_case )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
UpperCamelCase__ = _ask_field(_snake_case , lambda _snake_case : str(_snake_case ).lower() , default="ml.p3.2xlarge" )
UpperCamelCase__ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
UpperCamelCase__ = _ask_field(
"How many machines do you want use? [1]: " , _snake_case , default=1 , )
UpperCamelCase__ = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=_snake_case , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_snake_case , use_cpu=_snake_case , dynamo_config=_snake_case , eca_instance_type=_snake_case , profile=_snake_case , region=_snake_case , iam_role_name=_snake_case , mixed_precision=_snake_case , num_machines=_snake_case , sagemaker_inputs_file=_snake_case , sagemaker_metrics_file=_snake_case , )
| 516
|
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
A : Optional[Any] = logging.get_logger(__name__)
A : Tuple = 'Hello world! cécé herlolip'
def snake_case__ ( _snake_case : str , _snake_case : str , _snake_case : bool ):
"""simple docstring"""
UpperCamelCase__ = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
UpperCamelCase__ = roberta.model.encoder.sentence_encoder
UpperCamelCase__ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
UpperCamelCase__ = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , _snake_case )
UpperCamelCase__ = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase__ = roberta_sent_encoder.embed_tokens.weight
UpperCamelCase__ = roberta_sent_encoder.embed_positions.weight
UpperCamelCase__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCamelCase__ = roberta_sent_encoder.layer_norm.weight
UpperCamelCase__ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCamelCase__ = model.roberta.encoder.layer[i]
UpperCamelCase__ = roberta_sent_encoder.layers[i]
UpperCamelCase__ = layer.attention
UpperCamelCase__ = roberta_layer.self_attn_layer_norm.weight
UpperCamelCase__ = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCamelCase__ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCamelCase__ = roberta_layer.self_attn.q_proj.weight
UpperCamelCase__ = roberta_layer.self_attn.q_proj.bias
UpperCamelCase__ = roberta_layer.self_attn.k_proj.weight
UpperCamelCase__ = roberta_layer.self_attn.k_proj.bias
UpperCamelCase__ = roberta_layer.self_attn.v_proj.weight
UpperCamelCase__ = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase__ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCamelCase__ = roberta_layer.self_attn.out_proj.weight
UpperCamelCase__ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCamelCase__ = roberta_layer.final_layer_norm.weight
UpperCamelCase__ = roberta_layer.final_layer_norm.bias
# intermediate
UpperCamelCase__ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase__ = roberta_layer.fca.weight
UpperCamelCase__ = roberta_layer.fca.bias
# output
UpperCamelCase__ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase__ = roberta_layer.fca.weight
UpperCamelCase__ = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCamelCase__ = roberta.model.classification_heads["mnli"].dense.weight
UpperCamelCase__ = roberta.model.classification_heads["mnli"].dense.bias
UpperCamelCase__ = roberta.model.classification_heads["mnli"].out_proj.weight
UpperCamelCase__ = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCamelCase__ = roberta.model.encoder.lm_head.dense.weight
UpperCamelCase__ = roberta.model.encoder.lm_head.dense.bias
UpperCamelCase__ = roberta.model.encoder.lm_head.layer_norm.weight
UpperCamelCase__ = roberta.model.encoder.lm_head.layer_norm.bias
UpperCamelCase__ = roberta.model.encoder.lm_head.weight
UpperCamelCase__ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase__ = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
UpperCamelCase__ = model(_snake_case )[0]
if classification_head:
UpperCamelCase__ = roberta.model.classification_heads["mnli"](roberta.extract_features(_snake_case ) )
else:
UpperCamelCase__ = roberta.model(_snake_case )[0]
print(our_output.shape , their_output.shape )
UpperCamelCase__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
UpperCamelCase__ = torch.allclose(_snake_case , _snake_case , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case , exist_ok=_snake_case )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
A : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 516
| 1
|
from math import factorial
def lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float )-> float:
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
a =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a =float(factorial(UpperCAmelCase_ ) )
coefficient /= factorial(UpperCAmelCase_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 321
|
from __future__ import annotations
from collections.abc import Callable
def lowerCamelCase ( UpperCAmelCase_ : Callable[[int | float], int | float] , UpperCAmelCase_ : int | float , UpperCAmelCase_ : int | float , UpperCAmelCase_ : int = 100 , )-> float:
"""simple docstring"""
a =x_start
a =fnc(UpperCAmelCase_ )
a =0.0
for _ in range(UpperCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a =(x_end - x_start) / steps + xa
a =fnc(UpperCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a =xa
a =fxa
return area
if __name__ == "__main__":
def lowerCamelCase ( UpperCAmelCase_ : List[str] )-> Optional[Any]:
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowerCamelCase = 10
while i <= 100000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 321
| 1
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("only integers accepted as input" )
else:
lowercase__ = str(abs(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = [list(SCREAMING_SNAKE_CASE_ ) for char in range(len(SCREAMING_SNAKE_CASE_ ) )]
for index in range(len(SCREAMING_SNAKE_CASE_ ) ):
num_transpositions[index].pop(SCREAMING_SNAKE_CASE_ )
return max(
int("".join(list(SCREAMING_SNAKE_CASE_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 413
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 413
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
def __init__( self : Dict , UpperCamelCase__ : bool = True ):
'''simple docstring'''
lowercase_ = {} # dictionary of lists
lowercase_ = directed
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : T , UpperCamelCase__ : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase__ )
self.adj_list[destination_vertex].append(UpperCamelCase__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase__ )
lowercase_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCamelCase__ )
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase_ = [destination_vertex]
lowercase_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCamelCase__ )
lowercase_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase_ = [destination_vertex]
lowercase_ = []
return self
def __repr__( self : Optional[int] ):
'''simple docstring'''
return pformat(self.adj_list )
| 717
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.0_2 , ) -> List[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 1
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, pixel_values
def __snake_case ( self , A_ , A_ ) -> List[str]:
lowerCAmelCase = FlaxViTModel(config=A_ )
lowerCAmelCase = model(A_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = (self.image_size, self.image_size)
lowerCAmelCase = (self.patch_size, self.patch_size)
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __snake_case ( self , A_ , A_ ) -> Tuple:
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = FlaxViTForImageClassification(config=A_ )
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = FlaxViTForImageClassification(A_ )
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
lowerCAmelCase
) = config_and_inputs
lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __snake_case( __A , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __snake_case ( self ) -> None:
lowerCAmelCase = FlaxViTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __snake_case ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
lowerCAmelCase = model_class(A_ )
@jax.jit
def model_jitted(A_ , **A_ ):
return model(pixel_values=A_ , **A_ )
with self.subTest("""JIT Enabled""" ):
lowerCAmelCase = model_jitted(**A_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCAmelCase = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __snake_case ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
lowerCAmelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(A_ )
| 433
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25
| 0
|
'''simple docstring'''
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__a : List[Any] = mf_knapsack(i - 1 , lowercase , lowercase , lowercase )
else:
__a : Any = max(
mf_knapsack(i - 1 , lowercase , lowercase , lowercase ) , mf_knapsack(i - 1 , lowercase , lowercase , j - wt[i - 1] ) + val[i - 1] , )
__a : Optional[int] = val
return f[i][j]
def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int:
__a : int = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__a : Any = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__a : List[Any] = dp[i - 1][w_]
return dp[n][w_], dp
def _snake_case ( lowercase , lowercase , lowercase ) -> Any:
if not (isinstance(lowercase , (list, tuple) ) and isinstance(lowercase , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
__a : Tuple = len(lowercase )
if num_items != len(lowercase ):
__a : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
F"""But got {num_items} weights and {len(lowercase )} values"""
)
raise ValueError(lowercase )
for i in range(lowercase ):
if not isinstance(wt[i] , lowercase ):
__a : Tuple = (
"""All weights must be integers but got weight of """
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowercase )
__a : Dict = knapsack(lowercase , lowercase , lowercase , lowercase )
__a : set = set()
_construct_solution(lowercase , lowercase , lowercase , lowercase , lowercase )
return optimal_val, example_optional_set
def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase , lowercase , i - 1 , lowercase , lowercase )
else:
optimal_set.add(lowercase )
_construct_solution(lowercase , lowercase , i - 1 , j - wt[i - 1] , lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = [3, 2, 4, 4]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 3, 2, 3]
__SCREAMING_SNAKE_CASE : Any = 4
__SCREAMING_SNAKE_CASE : List[Any] = 6
__SCREAMING_SNAKE_CASE : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__SCREAMING_SNAKE_CASE : Optional[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__SCREAMING_SNAKE_CASE : Optional[Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 713
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def __lowerCamelCase ( self , __UpperCamelCase = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Union[str, Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Tuple = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get prompt text embeddings
__a : Tuple = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a : Union[str, Any] = text_embeddings.shape
__a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
__a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a : List[str]
if negative_prompt is None:
__a : Optional[Any] = [""""""]
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="""
f""" {type(__UpperCamelCase )}.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__a : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__a : Tuple = negative_prompt
__a : Any = text_input_ids.shape[-1]
__a : List[str] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
__a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a : List[str] = uncond_embeddings.shape[1]
__a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 )
__a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a : Any = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device )
__a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
__a : Optional[int] = torch.randn(
__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
__a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__a : Optional[Any] = latents_reference.to(self.device )
__a : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
__a : int = (latents_shape[2] - latents_shape_reference[2]) // 2
__a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a : Optional[Any] = 0 if dx < 0 else dx
__a : Optional[Any] = 0 if dy < 0 else dy
__a : Optional[int] = max(-dx , 0 )
__a : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a : Optional[Any] = {}
if accepts_eta:
__a : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
__a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a : List[str] = noise_pred.chunk(2 )
__a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample
__a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to(
self.device )
__a , __a : int = self.safety_checker(
images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a : Optional[int] = None
if output_type == "pil":
__a : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 697
| 0
|
"""simple docstring"""
def _lowerCamelCase ( lowerCamelCase__ : int = 4_00_00_00 ):
lowercase__ : Any = []
lowercase__ , lowercase__ : Optional[int] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCamelCase__ )
lowercase__ , lowercase__ : str = b, a + b
return sum(lowerCamelCase__ )
if __name__ == "__main__":
print(F"{solution() = }")
| 200
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'vocab_file': 'sentencepiece.bpe.model'}
__snake_case = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
__snake_case = {
'camembert-base': 512,
}
__snake_case = '▁'
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : List[Any] = VOCAB_FILES_NAMES
_a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<s>NOTUSED", "</s>NOTUSED"] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[int] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
lowercase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
lowercase__ : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase__ : Optional[int] = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowercase__ : int = len(self.fairseq_tokens_to_ids )
lowercase__ : Dict = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase__ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Any = [self.cls_token_id]
lowercase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
lowercase__ : int = [self.sep_token_id]
lowercase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__( self ) -> Optional[Any]:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Dict = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowerCamelCase__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
lowercase__ : Dict = []
lowercase__ : Dict = """"""
lowercase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
lowercase__ : List[Any] = True
lowercase__ : str = []
else:
current_sub_tokens.append(lowerCamelCase__ )
lowercase__ : Optional[Any] = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self ) -> int:
lowercase__ : Dict = self.__dict__.copy()
lowercase__ : Union[str, Any] = None
return state
def __setstate__( self , lowerCamelCase__ ) -> int:
lowercase__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ : Tuple = {}
lowercase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 200
| 1
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__A = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=snake_case )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
def __init__( self: Optional[int] , __A: str , __A: PreTrainedTokenizer , __A: str , __A: Optional[int] = None , __A: str=False , __A: bool = False , ) -> Dict:
_A = hans_processors[task]()
_A = os.path.join(
__A , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__A ) , __A , ) , )
_A = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A ,_A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + '''.lock'''
with FileLock(__A ):
if os.path.exists(__A ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
_A = torch.load(__A )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
_A = (
processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
)
logger.info('''Training examples: %s''' , len(__A ) )
_A = hans_convert_examples_to_features(__A , __A , __A , __A )
logger.info('''Saving features into cached file %s''' , __A )
torch.save(self.features , __A )
def __len__( self: Dict ) -> Any:
return len(self.features )
def __getitem__( self: Any , __A: List[Any] ) -> InputFeatures:
return self.features[i]
def __A ( self: Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 42
def __init__( self: Tuple , __A: str , __A: PreTrainedTokenizer , __A: str , __A: Optional[int] = 1_28 , __A: Dict=False , __A: bool = False , ) -> Optional[int]:
_A = hans_processors[task]()
_A = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A ,_A = label_list[2], label_list[1]
_A = label_list
_A = processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
_A = hans_convert_examples_to_features(__A , __A , __A , __A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_A = tf.data.Dataset.from_generator(
__A , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __A ( self: int ) -> Dict:
return self.dataset
def __len__( self: List[str] ) -> str:
return len(self.features )
def __getitem__( self: int , __A: Optional[Any] ) -> InputFeatures:
return self.features[i]
def __A ( self: List[str] ) -> int:
return self.label_list
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __A ( self: Tuple , __A: List[str] ) -> str:
return self._create_examples(self._read_tsv(os.path.join(__A , '''heuristics_train_set.txt''' ) ) , '''train''' )
def __A ( self: Any , __A: str ) -> int:
return self._create_examples(self._read_tsv(os.path.join(__A , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def __A ( self: Tuple ) -> Any:
return ["contradiction", "entailment", "neutral"]
def __A ( self: List[str] , __A: str , __A: Any ) -> Any:
_A = []
for i, line in enumerate(__A ):
if i == 0:
continue
_A = '''%s-%s''' % (set_type, line[0])
_A = line[5]
_A = line[6]
_A = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
_A = line[0]
examples.append(InputExample(guid=__A , text_a=__A , text_b=__A , label=__A , pairID=__A ) )
return examples
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
_A = {label: i for i, label in enumerate(_lowercase )}
_A = []
for ex_index, example in tqdm.tqdm(enumerate(_lowercase ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d''' % (ex_index) )
_A = tokenizer(
example.text_a , example.text_b , add_special_tokens=_lowercase , max_length=_lowercase , padding='''max_length''' , truncation=_lowercase , return_overflowing_tokens=_lowercase , )
_A = label_map[example.label] if example.label in label_map else 0
_A = int(example.pairID )
features.append(InputFeatures(**_lowercase , label=_lowercase , pairID=_lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__A = {
'hans': 3,
}
__A = {
'hans': HansProcessor,
}
| 62
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = BlenderbotSmallTokenizer
A_ = False
def __A ( self: List[str] ) -> int:
super().setUp()
_A = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_A = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def __A ( self: str , **__A: Optional[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: str , __A: List[str] ) -> int:
_A = '''adapt act apte'''
_A = '''adapt act apte'''
return input_text, output_text
def __A ( self: Union[str, Any] ) -> Any:
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = '''adapt act apte'''
_A = ['''adapt''', '''act''', '''ap@@''', '''te''']
_A = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def __A ( self: Any ) -> List[str]:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
_A = '''I am a small frog.'''
_A = tok([src_text] , padding=__A , truncation=__A )['''input_ids''']
_A = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self: Any ) -> int:
_A = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_A = '''I am a small frog .'''
_A = '''.'''
_A = tok(__A )['''input_ids''']
_A = tok(__A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 62
| 1
|
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _A ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
a__ : Optional[Any] =SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
a__ : Optional[int] =4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
a__ : int =4
a__ : Optional[int] =48
a__ : str ="pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
a__ : str =[6, 6, 6, 6]
a__ : Optional[int] =60
a__ : Any =[6, 6, 6, 6]
a__ : int ="pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
a__ : List[str] =4
a__ : Union[str, Any] ="nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
a__ : str =1
a__ : Optional[Any] =1
a__ : str =126
a__ : Optional[Any] =7
a__ : Optional[int] =2_5_5.0
a__ : str =""
return config
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
a__ : Any =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a__ : str =name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
a__ : Union[str, Any] =name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
a__ : List[Any] =name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
a__ : Union[str, Any] =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a__ : int =name.replace("attn" , "attention.self" )
if "norm1" in name:
a__ : List[Any] =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a__ : Optional[int] =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a__ : Dict =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a__ : Optional[int] =name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
a__ : List[Any] =name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
a__ : Optional[int] =name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
a__ : Optional[Any] =name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
a__ : List[str] =name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
a__ : List[Any] =name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
a__ : Dict ="layernorm.weight"
if name == "norm.bias":
a__ : Any ="layernorm.bias"
if "conv_first" in name:
a__ : Tuple =name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
a__ : List[str] =name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
a__ : str =name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
a__ : Any =name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
a__ : Optional[int] =name.replace("upsample.2" , "upsample.convolution_1" )
a__ : Any ="upsample." + name
elif config.upsampler == "pixelshuffledirect":
a__ : str =name.replace("upsample.0.weight" , "upsample.conv.weight" )
a__ : Any =name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
a__ : Dict ="swin2sr." + name
return name
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a__ : Dict =orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
a__ : str =key.split("." )
a__ : Optional[int] =int(key_split[1] )
a__ : Dict =int(key_split[4] )
a__ : List[Any] =config.embed_dim
if "weight" in key:
a__ : List[Any] =val[:dim, :]
a__ : List[str] =val[dim : dim * 2, :]
a__ : Dict =val[-dim:, :]
else:
a__ : int =val[:dim]
a__ : Union[str, Any] =val[dim : dim * 2]
a__ : Tuple =val[-dim:]
pass
else:
a__ : Union[str, Any] =val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[Any] =get_config(SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =SwinaSRForImageSuperResolution(SCREAMING_SNAKE_CASE )
model.eval()
a__ : Union[str, Any] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" )
a__ : Dict =convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ , a__ : List[Any] =model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("Missing keys when converting: {}".format(SCREAMING_SNAKE_CASE ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
a__ : str ="https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
a__ : List[Any] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
a__ : Dict =SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
a__ : List[str] =126 if "Jpeg" in checkpoint_url else 256
a__ : Optional[Any] =Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
a__ : Dict =transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
if config.num_channels == 1:
a__ : Tuple =pixel_values[:, 0, :, :].unsqueeze(1 )
a__ : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
a__ : str =torch.Size([1, 3, 512, 512] )
a__ : List[str] =torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
a__ : List[Any] =torch.Size([1, 3, 1_024, 1_024] )
a__ : List[str] =torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
a__ : Tuple =torch.Size([1, 3, 1_024, 1_024] )
a__ : Optional[int] =torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
a__ : Tuple =torch.Size([1, 3, 512, 512] )
a__ : str =torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
a__ : Optional[int] =torch.Size([1, 3, 1_024, 1_024] )
a__ : Optional[Any] =torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-3 )
print("Looks ok!" )
a__ : int ={
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
a__ : Any =url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 563
| 0
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
'''simple docstring'''
lowerCamelCase_ : int = None
def __UpperCAmelCase( self ):
__A : Dict = self.feature_extraction_class(**self.feat_extract_dict )
__A : int = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__A : Tuple = os.path.join(UpperCamelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(UpperCamelCase__ )
__A : Optional[int] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCAmelCase( self ):
__A : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__A : Tuple = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
__A : Any = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 700
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """gpt_neox"""
def __init__( self , __UpperCAmelCase=50_432 , __UpperCAmelCase=6_144 , __UpperCAmelCase=44 , __UpperCAmelCase=64 , __UpperCAmelCase=24_576 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.25 , __UpperCAmelCase=10_000 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=2_048 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__A : Optional[int] = vocab_size
__A : List[Any] = max_position_embeddings
__A : Any = hidden_size
__A : str = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Dict = intermediate_size
__A : List[Any] = hidden_act
__A : Tuple = rotary_pct
__A : Optional[int] = rotary_emb_base
__A : int = attention_dropout
__A : Optional[int] = hidden_dropout
__A : List[Any] = classifier_dropout
__A : Optional[Any] = initializer_range
__A : Optional[int] = layer_norm_eps
__A : str = use_cache
__A : Optional[int] = tie_word_embeddings
__A : Any = use_parallel_residual
__A : List[Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def __UpperCAmelCase( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
__A : Dict = self.rope_scaling.get("type" , __UpperCAmelCase )
__A : Dict = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 387
| 0
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
UpperCamelCase : List[Any] = tensor_name.split("." )
for split in splits[:-1]:
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Dict = new_module
UpperCamelCase : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
UpperCamelCase : Union[str, Any] = tensor_name in module._buffers
UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase : List[str] = False
UpperCamelCase : Tuple = False
else:
UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase : Dict = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[Any] = value.to("cpu" )
if value.dtype == torch.inta:
UpperCamelCase : Tuple = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None:
UpperCamelCase : Union[str, Any] = new_value.T
UpperCamelCase : Union[str, Any] = old_value.__dict__
if is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
elif is_abit:
UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ )
UpperCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(snake_case__ ) )
else:
if value is None:
UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ )
elif isinstance(snake_case__ , torch.Tensor ):
UpperCamelCase : List[str] = value.to(snake_case__ )
else:
UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ )
if is_buffer:
UpperCamelCase : Optional[int] = new_value
else:
UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad )
UpperCamelCase : List[str] = new_value
def A_ ( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False ) -> int:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : str = []
current_key_name.append(snake_case__ )
if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(snake_case__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = module.weight.shape
else:
UpperCamelCase : Any = module.in_features
UpperCamelCase : List[str] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase : Any = bnb.nn.LinearabitLt(
snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase : str = bnb.nn.Linearabit(
snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase : int = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase : Any = type(snake_case__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase : Optional[int] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A_ ( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , snake_case__ , )
return replace_with_bnb_linear(*snake_case__ , **snake_case__ )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , snake_case__ , )
return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase : List[str] = find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] )
UpperCamelCase : Optional[int] = len(snake_case__ ) > 0
# Check if it is a base model
UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : List[Any] = list(model.named_children() )
UpperCamelCase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ )
UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
UpperCamelCase : Tuple = ['.weight', '.bias']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[int] = name.replace(snake_case__ , "" )
filtered_module_names.append(snake_case__ )
return filtered_module_names
| 629
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : int = IFPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self ) -> str:
return self._get_dummy_components()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' )
def snake_case_ ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case_ ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
self._test_save_load_local()
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def snake_case_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
# if
UpperCamelCase : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa )
UpperCamelCase : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
UpperCamelCase , UpperCamelCase : List[str] = pipe_a.encode_prompt('anime turtle', device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCamelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
UpperCamelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCamelCase : Dict = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', )
UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# pipeline 2
_start_torch_memory_measurement()
UpperCamelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase : str = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', )
UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 40
| 0
|
from collections import Counter
from timeit import timeit
def UpperCamelCase ( _A : str = "" , )-> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def UpperCamelCase ( _A : str = "" )-> bool:
"""simple docstring"""
if len(_A ) == 0:
return True
A__ = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A__ = {}
for character in lower_case_input_str:
A__ = character_freq_dict.get(_A , 0 ) + 1
A__ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCamelCase ( _A : str = "" )-> None:
"""simple docstring"""
print("\nFor string = " , _A , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_A ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_A ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase_ : Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 232
|
def UpperCamelCase ( _A : int = 50 )-> int:
"""simple docstring"""
A__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 232
| 1
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 334
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ =logging.get_logger()
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_128s''' , pretrained=UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_128''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 1_92:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_192''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 2_56:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_256''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 3_84:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_384''' , pretrained=UpperCAmelCase__ )
from_model.eval()
__SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE = from_model.state_dict()
__SCREAMING_SNAKE_CASE = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for i in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.randn((2, 3, 2_24, 2_24) )
__SCREAMING_SNAKE_CASE = from_model(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = our_model(UpperCAmelCase__ ).logits
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE = name
print(UpperCAmelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = (1, num_labels)
__SCREAMING_SNAKE_CASE = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = partial(UpperCAmelCase__ , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
__SCREAMING_SNAKE_CASE = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCAmelCase__ , names_to_config[model_name] , UpperCAmelCase__ , UpperCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
lowerCAmelCase__ =parser.parse_args()
lowerCAmelCase__ =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 482
| 0
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a = 1_01 ) -> Any:
UpperCamelCase = length
def __len__(self ) -> str:
return self.length
def __getitem__(self , __a ) -> int:
return i
class _lowerCamelCase :
def __call__(self , __a ) -> Tuple:
return {"input_ids": torch.tensor(__a ), "labels": torch.tensor(__a )}
class _lowerCamelCase ( nn.Module ):
def __init__(self ) -> Union[str, Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_20 , 80 )
def snake_case_ (self , __a , __a=None ) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _lowerCamelCase ( _lowercase ):
@require_torch_neuroncore
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"--output_dir {output_dir}".split()
UpperCamelCase = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _lowerCamelCase ( _lowercase ):
@require_torch_multi_gpu
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"--output_dir {output_dir}".split()
UpperCamelCase = ["torchrun"] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCAmelCase__ = HfArgumentParser((TrainingArguments,))
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCAmelCase__ = DummyDataset(dataset_length)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = list(range(len(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
lowerCAmelCase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase__ = 2
lowerCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase__ = None
| 544
|
"""simple docstring"""
lowerCAmelCase__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 544
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ ( lowercase__ ):
a__ : Dict = """donut-swin"""
a__ : int = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int, __lowerCamelCase : List[Any]=2_24, __lowerCamelCase : int=4, __lowerCamelCase : Tuple=3, __lowerCamelCase : int=96, __lowerCamelCase : List[Any]=[2, 2, 6, 2], __lowerCamelCase : Tuple=[3, 6, 12, 24], __lowerCamelCase : int=7, __lowerCamelCase : Any=4.0, __lowerCamelCase : Dict=True, __lowerCamelCase : str=0.0, __lowerCamelCase : Optional[int]=0.0, __lowerCamelCase : List[Any]=0.1, __lowerCamelCase : Tuple="gelu", __lowerCamelCase : List[Any]=False, __lowerCamelCase : int=0.02, __lowerCamelCase : Optional[Any]=1e-5, **__lowerCamelCase : Union[str, Any], ) -> str:
super().__init__(**_A )
UpperCamelCase__ : Any = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : int = num_channels
UpperCamelCase__ : Optional[int] = embed_dim
UpperCamelCase__ : List[str] = depths
UpperCamelCase__ : int = len(_A )
UpperCamelCase__ : Union[str, Any] = num_heads
UpperCamelCase__ : List[str] = window_size
UpperCamelCase__ : Dict = mlp_ratio
UpperCamelCase__ : Union[str, Any] = qkv_bias
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = drop_path_rate
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : List[str] = use_absolute_embeddings
UpperCamelCase__ : Union[str, Any] = layer_norm_eps
UpperCamelCase__ : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ : List[Any] = int(embed_dim * 2 ** (len(_A ) - 1) )
| 344
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase :Optional[Any] = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = """instructblip_vision_model"""
def __init__( self : List[Any] , _A : Dict=1408 , _A : Union[str, Any]=6144 , _A : Optional[int]=39 , _A : Optional[int]=16 , _A : Optional[int]=224 , _A : Any=14 , _A : Optional[int]="gelu" , _A : str=1E-6 , _A : str=0.0 , _A : str=1E-10 , _A : Optional[Any]=True , **_A : List[Any] , ) -> Dict:
super().__init__(**_A )
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : int = intermediate_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = patch_size
__magic_name__ : Tuple = image_size
__magic_name__ : int = initializer_range
__magic_name__ : str = attention_dropout
__magic_name__ : int = layer_norm_eps
__magic_name__ : Optional[int] = hidden_act
__magic_name__ : Tuple = qkv_bias
@classmethod
def __lowerCAmelCase ( cls : List[Any] , _A : Union[str, os.PathLike] , **_A : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_A )
__magic_name__ , __magic_name__ : Union[str, Any] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__magic_name__ : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = """instructblip_qformer"""
def __init__( self : Dict , _A : Dict=30522 , _A : List[str]=768 , _A : Tuple=12 , _A : List[Any]=12 , _A : Optional[int]=3072 , _A : Optional[Any]="gelu" , _A : Tuple=0.1 , _A : Any=0.1 , _A : int=512 , _A : Tuple=0.02 , _A : Optional[Any]=1E-12 , _A : List[Any]=0 , _A : Tuple="absolute" , _A : Dict=2 , _A : Tuple=1408 , **_A : int , ) -> Optional[int]:
super().__init__(pad_token_id=_A , **_A )
__magic_name__ : Any = vocab_size
__magic_name__ : str = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : str = hidden_act
__magic_name__ : List[str] = intermediate_size
__magic_name__ : List[str] = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : Union[str, Any] = position_embedding_type
__magic_name__ : Any = cross_attention_frequency
__magic_name__ : int = encoder_hidden_size
@classmethod
def __lowerCAmelCase ( cls : int , _A : Union[str, os.PathLike] , **_A : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_A )
__magic_name__ , __magic_name__ : str = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__magic_name__ : Union[str, Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : int = """instructblip"""
A_ : Any = True
def __init__( self : int , _A : Optional[int]=None , _A : List[str]=None , _A : Union[str, Any]=None , _A : Any=32 , **_A : int ) -> Any:
super().__init__(**_A )
if vision_config is None:
__magic_name__ : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__magic_name__ : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__magic_name__ : List[str] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__magic_name__ : Union[str, Any] = InstructBlipVisionConfig(**_A )
__magic_name__ : str = InstructBlipQFormerConfig(**_A )
__magic_name__ : int = text_config['model_type'] if 'model_type' in text_config else 'opt'
__magic_name__ : Tuple = CONFIG_MAPPING[text_model_type](**_A )
__magic_name__ : Optional[Any] = self.text_config.tie_word_embeddings
__magic_name__ : int = self.text_config.is_encoder_decoder
__magic_name__ : List[Any] = num_query_tokens
__magic_name__ : Tuple = self.vision_config.hidden_size
__magic_name__ : int = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__magic_name__ : int = 1.0
__magic_name__ : List[Any] = 0.02
@classmethod
def __lowerCAmelCase ( cls : str , _A : InstructBlipVisionConfig , _A : InstructBlipQFormerConfig , _A : PretrainedConfig , **_A : int , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : int = copy.deepcopy(self.__dict__ )
__magic_name__ : str = self.vision_config.to_dict()
__magic_name__ : List[str] = self.qformer_config.to_dict()
__magic_name__ : Tuple = self.text_config.to_dict()
__magic_name__ : str = self.__class__.model_type
return output
| 561
| 0
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int = 10**9 ) -> int:
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 327
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# Load the entity vocab file
SCREAMING_SNAKE_CASE = load_entity_vocab(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE = entity_emb[entity_vocab['[MASK]']]
SCREAMING_SNAKE_CASE = LukeModel(config=SCREAMING_SNAKE_CASE_ ).eval()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if not (len(SCREAMING_SNAKE_CASE_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(SCREAMING_SNAKE_CASE_ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task='entity_classification' )
SCREAMING_SNAKE_CASE = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
SCREAMING_SNAKE_CASE = (39, 42)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , add_prefix_space=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE = torch.Size((1, 42, 10_24) )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 42, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 10_24) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = {}
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = line.rstrip().split('\t' )
SCREAMING_SNAKE_CASE = index
return entity_vocab
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 327
| 1
|
from bisect import bisect
from itertools import accumulate
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int ) -> Tuple:
__lowerCAmelCase : int = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = [i[0] for i in r], [i[1] for i in r]
__lowerCAmelCase : str = list(accumulate(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 504
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ ( __lowercase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : List[Any] )->str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self : List[Any] , _snake_case : int = 1 , _snake_case : int = 100 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[float] = None , _snake_case : bool = True , )->Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
__lowerCAmelCase : Tuple = self.unet.config.sample_size / self.unet.config.sample_rate
__lowerCAmelCase : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
__lowerCAmelCase : Optional[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__lowerCAmelCase : Optional[Any] = int(_snake_case )
if sample_size % down_scale_factor != 0:
__lowerCAmelCase : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
__lowerCAmelCase : int = int(_snake_case )
__lowerCAmelCase : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
__lowerCAmelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__lowerCAmelCase : Tuple = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
# set step values
self.scheduler.set_timesteps(_snake_case , device=audio.device )
__lowerCAmelCase : Dict = self.scheduler.timesteps.to(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCAmelCase : str = self.unet(_snake_case , _snake_case ).sample
# 2. compute previous image: x_t -> t_t-1
__lowerCAmelCase : Optional[int] = self.scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
__lowerCAmelCase : int = audio.clamp(-1 , 1 ).float().cpu().numpy()
__lowerCAmelCase : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_snake_case )
| 504
| 1
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : str = DDIMPipeline
__A : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__A : str = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
__A : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__A : int = False
def __lowercase ( self) -> List[str]:
'''simple docstring'''
torch.manual_seed(0)
a__ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
a__ : Optional[Any] = DDIMScheduler()
a__ : List[Any] = {'unet': unet, 'scheduler': scheduler}
return components
def __lowercase ( self , lowercase , lowercase=0) -> str:
'''simple docstring'''
if str(lowercase).startswith('mps'):
a__ : str = torch.manual_seed(lowercase)
else:
a__ : int = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : List[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = 'cpu'
a__ : Optional[int] = self.get_dummy_components()
a__ : List[Any] = self.pipeline_class(**lowercase)
pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = self.get_dummy_inputs(lowercase)
a__ : int = pipe(**lowercase).images
a__ : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3))
a__ : Optional[Any] = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04])
a__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowercase , 1e-3)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = 'google/ddpm-cifar10-32'
a__ : List[str] = UNetaDModel.from_pretrained(lowercase)
a__ : Union[str, Any] = DDIMScheduler()
a__ : str = DDIMPipeline(unet=lowercase , scheduler=lowercase)
ddim.to(lowercase)
ddim.set_progress_bar_config(disable=lowercase)
a__ : Any = torch.manual_seed(0)
a__ : Tuple = ddim(generator=lowercase , eta=0.0 , output_type='numpy').images
a__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a__ : Optional[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Dict = 'google/ddpm-ema-bedroom-256'
a__ : Any = UNetaDModel.from_pretrained(lowercase)
a__ : int = DDIMScheduler.from_pretrained(lowercase)
a__ : List[str] = DDIMPipeline(unet=lowercase , scheduler=lowercase)
ddpm.to(lowercase)
ddpm.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = torch.manual_seed(0)
a__ : Optional[int] = ddpm(generator=lowercase , output_type='numpy').images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a__ : List[str] = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 392
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Any = tempfile.mkdtemp()
a__ : Tuple = 5
# Realm tok
a__ : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a__ : Any = os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(lowercase , exist_ok=lowercase)
a__ : int = os.path.join(lowercase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
a__ : List[str] = os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(lowercase , exist_ok=lowercase)
def __lowercase ( self) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : int = RealmConfig(num_block_records=self.num_block_records)
return config
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Tuple = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=lowercase , )
return block_records
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[Any] = self.get_config()
a__ : Tuple = self.get_dummy_retriever()
a__ : Tuple = retriever.tokenizer
a__ : str = np.array([0, 3] , dtype='long')
a__ : Optional[int] = tokenizer(['Test question']).input_ids
a__ : List[str] = tokenizer(
['the fourth'] , add_special_tokens=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , ).input_ids
a__ : str = config.reader_seq_len
a__ , a__ , a__ , a__ : int = retriever(
lowercase , lowercase , answer_ids=lowercase , max_length=lowercase , return_tensors='np')
self.assertEqual(len(lowercase) , 2)
self.assertEqual(len(lowercase) , 2)
self.assertEqual(len(lowercase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : List[str] = self.get_config()
a__ : Union[str, Any] = self.get_dummy_retriever()
a__ : List[Any] = retriever.tokenizer
a__ : Any = np.array([0, 3, 5] , dtype='long')
a__ : Tuple = tokenizer(['Test question']).input_ids
a__ : Optional[Any] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , ).input_ids
a__ : Dict = config.reader_seq_len
a__ , a__ , a__ , a__ : Dict = retriever(
lowercase , lowercase , answer_ids=lowercase , max_length=lowercase , return_tensors='np')
self.assertEqual([False, True, True] , lowercase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowercase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
a__ : Optional[int] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , b'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
a__ : str = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
a__ : str = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , b'This is the first record')
| 392
| 1
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # noqa: E741
while r - l > 1:
snake_case_ = (l + r) // 2
if v[m] >= key:
snake_case_ = m
else:
snake_case_ = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return 0
snake_case_ = [0] * len(SCREAMING_SNAKE_CASE__ )
snake_case_ = 1
snake_case_ = v[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
if v[i] < tail[0]:
snake_case_ = v[i]
elif v[i] > tail[length - 1]:
snake_case_ = v[i]
length += 1
else:
snake_case_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89
| 0
|
'''simple docstring'''
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _A () -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 532
|
'''simple docstring'''
class a :
def __init__( self , __magic_name__ ) -> Optional[int]:
_a = n
_a = [None] * self.n
_a = 0 # index of the first element
_a = 0
_a = 0
def __len__( self ) -> int:
return self.size
def __UpperCAmelCase ( self ) -> bool:
return self.size == 0
def __UpperCAmelCase ( self ) -> Optional[Any]:
return False if self.is_empty() else self.array[self.front]
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[Any]:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_a = data
_a = (self.rear + 1) % self.n
self.size += 1
return self
def __UpperCAmelCase ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
_a = self.array[self.front]
_a = None
_a = (self.front + 1) % self.n
self.size -= 1
return temp
| 532
| 1
|
"""simple docstring"""
def _snake_case ( lowercase__ = 4000000 ):
_lowerCamelCase : Dict = [0, 1]
_lowerCamelCase : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_lowerCamelCase : List[str] = 0
for j in range(len(lowercase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"{solution() = }")
| 630
|
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = """google/mobilebert-uncased"""
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[int] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowerCamelCase : Any = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A_ ( self , lowercase ):
_lowerCamelCase : Union[str, Any] = 'UNwant\u00E9d,running'
_lowerCamelCase : List[Any] = 'unwanted, running'
return input_text, output_text
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class(self.vocab_file )
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [9, 6, 7, 12, 10, 11] )
def A_ ( self ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Any = self.get_rust_tokenizer()
_lowerCamelCase : int = 'UNwant\u00E9d,running'
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize(lowercase )
_lowerCamelCase : List[Any] = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : Dict = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Dict = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.encode(lowercase )
_lowerCamelCase : List[str] = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
# With lower casing
_lowerCamelCase : List[Any] = self.get_tokenizer(do_lower_case=lowercase )
_lowerCamelCase : int = self.get_rust_tokenizer(do_lower_case=lowercase )
_lowerCamelCase : Optional[Any] = 'UNwant\u00E9d,running'
_lowerCamelCase : Dict = tokenizer.tokenize(lowercase )
_lowerCamelCase : int = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : Dict = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Dict = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : Any = self.get_rust_tokenizer()
_lowerCamelCase : Union[str, Any] = tokenizer.encode(lowercase )
_lowerCamelCase : Dict = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self ):
_lowerCamelCase : List[str] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self ):
_lowerCamelCase : List[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self ):
_lowerCamelCase : int = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self ):
_lowerCamelCase : List[str] = BasicTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self ):
_lowerCamelCase : int = BasicTokenizer(do_lower_case=lowercase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_lowerCamelCase : Tuple = {}
for i, token in enumerate(lowercase ):
_lowerCamelCase : Union[str, Any] = i
_lowerCamelCase : str = WordpieceTokenizer(vocab=lowercase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def A_ ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
_lowerCamelCase : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
_lowerCamelCase : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
_lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase )
_lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def A_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : int = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_lowerCamelCase : int = tokenizer_r.encode_plus(
lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , )
_lowerCamelCase : Tuple = tokenizer_r.do_lower_case if hasattr(lowercase , 'do_lower_case' ) else False
_lowerCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def A_ ( self ):
_lowerCamelCase : Tuple = ['的', '人', '有']
_lowerCamelCase : Optional[Any] = ''.join(lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int = True
_lowerCamelCase : Any = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : List[str] = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : List[Any] = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Any = tokenizer_r.convert_ids_to_tokens(lowercase )
_lowerCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : Optional[Any] = tokenizer_r.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : str = tokenizer_p.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(lowercase )
_lowerCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCamelCase : List[str] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowercase )
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
| 630
| 1
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = ["vqvae"]
def __init__( self : int , __snake_case : AutoencoderKL , __snake_case : UNetaDConditionModel , __snake_case : Mel , __snake_case : Union[DDIMScheduler, DDPMScheduler] , ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case , mel=__snake_case , vqvae=__snake_case )
def lowerCamelCase__ ( self : Tuple ) -> int:
return 5_0 if isinstance(self.scheduler , __snake_case ) else 1_0_0_0
@torch.no_grad()
def __call__( self : Optional[Any] , __snake_case : int = 1 , __snake_case : str = None , __snake_case : np.ndarray = None , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = None , __snake_case : torch.Generator = None , __snake_case : float = 0 , __snake_case : float = 0 , __snake_case : torch.Generator = None , __snake_case : float = 0 , __snake_case : torch.Tensor = None , __snake_case : torch.Tensor = None , __snake_case : str=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
__magic_name__: Dict = steps or self.get_default_steps()
self.scheduler.set_timesteps(__snake_case )
__magic_name__: Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__magic_name__: int = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__magic_name__: Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__snake_case , device=self.device , )
__magic_name__: List[str] = noise
__magic_name__: Union[str, Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__snake_case , __snake_case )
__magic_name__: List[str] = self.mel.audio_slice_to_image(__snake_case )
__magic_name__: Union[str, Any] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
__magic_name__: Dict = (input_image / 2_5_5) * 2 - 1
__magic_name__: List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__magic_name__: List[str] = self.vqvae.encode(torch.unsqueeze(__snake_case , 0 ) ).latent_dist.sample(
generator=__snake_case )[0]
__magic_name__: Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__magic_name__: List[str] = self.scheduler.add_noise(__snake_case , __snake_case , self.scheduler.timesteps[start_step - 1] )
__magic_name__: Any = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__magic_name__: Optional[int] = int(mask_start_secs * pixels_per_second )
__magic_name__: int = int(mask_end_secs * pixels_per_second )
__magic_name__: Tuple = self.scheduler.add_noise(__snake_case , __snake_case , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __snake_case ):
__magic_name__: Optional[int] = self.unet(__snake_case , __snake_case , __snake_case )["""sample"""]
else:
__magic_name__: List[str] = self.unet(__snake_case , __snake_case )["""sample"""]
if isinstance(self.scheduler , __snake_case ):
__magic_name__: Dict = self.scheduler.step(
model_output=__snake_case , timestep=__snake_case , sample=__snake_case , eta=__snake_case , generator=__snake_case , )["""prev_sample"""]
else:
__magic_name__: Tuple = self.scheduler.step(
model_output=__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
__magic_name__: List[str] = mask[:, step, :, :mask_start]
if mask_end > 0:
__magic_name__: List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__magic_name__: Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
__magic_name__: Union[str, Any] = self.vqvae.decode(__snake_case )["""sample"""]
__magic_name__: Tuple = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__: List[str] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__magic_name__: Tuple = (images * 2_5_5).round().astype("""uint8""" )
__magic_name__: Union[str, Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__snake_case , mode="""RGB""" ).convert("""L""" ) for _ in images) )
__magic_name__: str = [self.mel.image_to_audio(__snake_case ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__snake_case )[:, np.newaxis, :] ) , **ImagePipelineOutput(__snake_case ) )
@torch.no_grad()
def lowerCamelCase__ ( self : List[Any] , __snake_case : List[Image.Image] , __snake_case : int = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , __snake_case )
self.scheduler.set_timesteps(__snake_case )
__magic_name__: Optional[int] = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
__magic_name__: List[Any] = (sample / 2_5_5) * 2 - 1
__magic_name__: List[Any] = torch.Tensor(__snake_case ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__magic_name__: Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__magic_name__: Union[str, Any] = self.scheduler.alphas_cumprod[t]
__magic_name__: str = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__magic_name__: List[str] = 1 - alpha_prod_t
__magic_name__: int = self.unet(__snake_case , __snake_case )["""sample"""]
__magic_name__: Tuple = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__magic_name__: Tuple = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__magic_name__: str = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCamelCase__ ( __snake_case : torch.Tensor , __snake_case : torch.Tensor , __snake_case : float ) -> torch.Tensor:
__magic_name__: Optional[Any] = acos(torch.dot(torch.flatten(__snake_case ) , torch.flatten(__snake_case ) ) / torch.norm(__snake_case ) / torch.norm(__snake_case ) )
return sin((1 - alpha) * theta ) * xa / sin(__snake_case ) + sin(alpha * theta ) * xa / sin(__snake_case )
| 700
|
"""simple docstring"""
from math import factorial
__lowerCamelCase = {str(digit): factorial(digit) for digit in range(10)}
def a ( __UpperCAmelCase : int ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCAmelCase ) )
def a ( __UpperCAmelCase : int = 6_0 , __UpperCAmelCase : int = 1_0_0_0_0_0_0 ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
__magic_name__: Optional[Any] = 0
# the cached sizes of the previous chains
__magic_name__: dict[int, int] = {}
for start_chain_element in range(1 , __UpperCAmelCase ):
# The temporary set will contain the elements of the chain
__magic_name__: Tuple = set()
__magic_name__: Optional[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__magic_name__: Dict = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__UpperCAmelCase )
chain_set_length += 1
__magic_name__: Union[str, Any] = digit_factorial_sum(__UpperCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__magic_name__: int = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 213
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class _a ( __a ):
"""simple docstring"""
A_ = '''open-llama'''
def __init__( self : Any , lowercase_ : Optional[int]=100_000 , lowercase_ : Dict=4_096 , lowercase_ : List[str]=11_008 , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=32 , lowercase_ : Any="silu" , lowercase_ : List[Any]=2_048 , lowercase_ : str=0.0_2 , lowercase_ : Optional[Any]=1e-6 , lowercase_ : int=True , lowercase_ : Dict=0 , lowercase_ : List[Any]=1 , lowercase_ : Optional[int]=2 , lowercase_ : Tuple=False , lowercase_ : Optional[int]=True , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=True , lowercase_ : List[str]=True , lowercase_ : str=None , **lowercase_ : int , ):
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = intermediate_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = initializer_range
lowercase_ = rms_norm_eps
lowercase_ = use_cache
lowercase_ = kwargs.pop(
"""use_memorry_efficient_attention""" , lowercase_ )
lowercase_ = hidden_dropout_prob
lowercase_ = attention_dropout_prob
lowercase_ = use_stable_embedding
lowercase_ = shared_input_output_embedding
lowercase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"""got {self.rope_scaling}""" )
lowercase_ = self.rope_scaling.get("""type""" , lowercase_ )
lowercase_ = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 451
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__snake_case = """src/transformers"""
__snake_case = """docs/source/en"""
__snake_case = """."""
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
with open(SCREAMING_SNAKE_CASE_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ = f.readlines()
# Find the start prompt.
lowercase_ = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE_ ):
start_index += 1
start_index += 1
lowercase_ = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__snake_case = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
__snake_case = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
__snake_case = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
__snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Dict:
lowercase_ = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , SCREAMING_SNAKE_CASE_ )
return [m.group(0 ) for m in matches]
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
lowercase_ = 2 if text == """✅""" or text == """❌""" else len(SCREAMING_SNAKE_CASE_ )
lowercase_ = (width - text_length) // 2
lowercase_ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def A_ ( ) ->Tuple:
lowercase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
lowercase_ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
lowercase_ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
lowercase_ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
lowercase_ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(SCREAMING_SNAKE_CASE_ ):
lowercase_ = None
if attr_name.endswith("""Tokenizer""" ):
lowercase_ = slow_tokenizers
lowercase_ = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase_ = fast_tokenizers
lowercase_ = attr_name[:-13]
elif _re_tf_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
lowercase_ = tf_models
lowercase_ = _re_tf_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
lowercase_ = flax_models
lowercase_ = _re_flax_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
lowercase_ = pt_models
lowercase_ = _re_pt_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE_ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ = True
break
# Try again after removing the last word in the name
lowercase_ = """""".join(camel_case_split(SCREAMING_SNAKE_CASE_ )[:-1] )
# Let's build that table!
lowercase_ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ = [len(SCREAMING_SNAKE_CASE_ ) + 2 for c in columns]
lowercase_ = max([len(SCREAMING_SNAKE_CASE_ ) for name in model_names] ) + 2
# Build the table per se
lowercase_ = """|""" + """|""".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase_ = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase_ = model_name_to_prefix[name]
lowercase_ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for l, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + "|\n"
return table
def A_ ( SCREAMING_SNAKE_CASE_=False ) ->Dict:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE_ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase_ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 451
| 1
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__magic_name__ : Any = getLogger(__name__)
__magic_name__ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 8 , _UpperCamelCase = DEFAULT_DEVICE , _UpperCamelCase=False , _UpperCamelCase="summarization" , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
"""simple docstring"""
UpperCamelCase = Path(_UpperCamelCase).open('w' , encoding='utf-8')
UpperCamelCase = str(_UpperCamelCase)
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase).to(_UpperCamelCase)
if fpaa:
UpperCamelCase = model.half()
UpperCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase)
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}') # if this is wrong, check config.model_type.
UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase , _UpperCamelCase)
if prefix is None:
UpperCamelCase = prefix or getattr(model.config , 'prefix' , '') or ''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase , _UpperCamelCase))):
UpperCamelCase = [prefix + text for text in examples_chunk]
UpperCamelCase = tokenizer(_UpperCamelCase , return_tensors='pt' , truncation=_UpperCamelCase , padding='longest').to(_UpperCamelCase)
UpperCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_UpperCamelCase , )
UpperCamelCase = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase)
for hypothesis in dec:
fout.write(hypothesis + '\n')
fout.flush()
fout.close()
UpperCamelCase = int(time.time() - start_time) # seconds
UpperCamelCase = len(_UpperCamelCase)
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4)}
def lowercase__ ( ) -> str:
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def lowercase__ ( _UpperCamelCase=True) -> List[str]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('model_name' , type=_UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.')
parser.add_argument('input_path' , type=_UpperCamelCase , help='like cnn_dm/test.source')
parser.add_argument('save_path' , type=_UpperCamelCase , help='where to save summaries')
parser.add_argument('--reference_path' , type=_UpperCamelCase , required=_UpperCamelCase , help='like cnn_dm/test.target')
parser.add_argument('--score_path' , type=_UpperCamelCase , required=_UpperCamelCase , default='metrics.json' , help='where to save metrics')
parser.add_argument('--device' , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help='cuda, cuda:1, cpu etc.')
parser.add_argument(
'--prefix' , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help='will be added to the begininng of src examples')
parser.add_argument('--task' , type=_UpperCamelCase , default='summarization' , help='used for task_specific_params + metrics')
parser.add_argument('--bs' , type=_UpperCamelCase , default=8 , required=_UpperCamelCase , help='batch size')
parser.add_argument(
'--n_obs' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='How many observations. Defaults to all.')
parser.add_argument('--fp16' , action='store_true')
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results')
parser.add_argument(
'--info' , nargs='?' , type=_UpperCamelCase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase , UpperCamelCase = parser.parse_known_args()
UpperCamelCase = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase)
if parsed_args and verbose:
print(F'parsed the following generate kwargs: {parsed_args}')
UpperCamelCase = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path).readlines()]
if args.n_obs > 0:
UpperCamelCase = examples[: args.n_obs]
Path(args.save_path).parent.mkdir(exist_ok=_UpperCamelCase)
if args.reference_path is None and Path(args.score_path).exists():
warnings.warn(F'score_path {args.score_path} will be overwritten unless you type ctrl-c.')
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu')
UpperCamelCase = generate_summaries_or_translations(
_UpperCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_UpperCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase = calculate_bleu if 'translation' in args.task else calculate_rouge
UpperCamelCase = [x.rstrip() for x in open(args.save_path).readlines()]
UpperCamelCase = [x.rstrip() for x in open(args.reference_path).readlines()][: len(_UpperCamelCase)]
UpperCamelCase = score_fn(_UpperCamelCase , _UpperCamelCase)
scores.update(_UpperCamelCase)
if args.dump_args:
scores.update(_UpperCamelCase)
if args.info:
UpperCamelCase = args.info
if verbose:
print(_UpperCamelCase)
if args.score_path is not None:
json.dump(_UpperCamelCase , open(args.score_path , 'w'))
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 410
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = MvpTokenizer
snake_case__ = MvpTokenizerFast
snake_case__ = True
snake_case__ = filter_roberta_detectors
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : int , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCamelCase = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test that special tokens are reset
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _SCREAMING_SNAKE_CASE )
self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('labels' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_attention_mask' , _SCREAMING_SNAKE_CASE )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ['A long paragraph for summarization.']
UpperCamelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , text_target=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = 'A, <mask> AllenNLP sentence.'
UpperCamelCase = tokenizer_r.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer_p.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 410
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Union[str, Any] = 16
A_ : Optional[Any] = 32
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1_6 ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase = 1_6
elif accelerator.mixed_precision != "no":
__UpperCAmelCase = 8
else:
__UpperCAmelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ : Any = mocked_dataloaders # noqa: F811
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE ) == "1":
__UpperCAmelCase = 2
# Initialize accelerator
__UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase = config['''lr''']
__UpperCAmelCase = int(config['''num_epochs'''] )
__UpperCAmelCase = int(config['''seed'''] )
__UpperCAmelCase = int(config['''batch_size'''] )
__UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=SCREAMING_SNAKE_CASE )
def inner_training_loop(SCREAMING_SNAKE_CASE ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_0_0 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase = model(**SCREAMING_SNAKE_CASE )
__UpperCAmelCase = outputs.loss
accelerator.backward(SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase = model(**SCREAMING_SNAKE_CASE )
__UpperCAmelCase = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __a ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 303
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCAmelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE , id=SCREAMING_SNAKE_CASE )
| 303
| 1
|
def A__ ( SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 7 , SCREAMING_SNAKE_CASE__ = 100_0000) -> int:
__snake_case: Any = 0
__snake_case: Optional[int] = 1
for current_denominator in range(1 , limit + 1):
__snake_case: Any = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__snake_case: Union[str, Any] = current_numerator
__snake_case: Tuple = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 155
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = tempfile.mkdtemp()
__snake_case: Tuple = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
__snake_case: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__snake_case: int = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"""do_convert_rgb""": True,
}
__snake_case: List[Any] = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A , A )
def UpperCAmelCase__ ( self : Optional[int] , **A : int ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : Optional[Any] , **A : Optional[int] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : Dict , **A : Any ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case: str = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = self.get_tokenizer()
__snake_case: Dict = self.get_rust_tokenizer()
__snake_case: Union[str, Any] = self.get_image_processor()
__snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case: List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
__snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case: Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case: Optional[Any] = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
__snake_case: Dict = self.get_image_processor(do_normalize=A )
__snake_case: int = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Tuple = self.get_image_processor()
__snake_case: Optional[int] = self.get_tokenizer()
__snake_case: List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
__snake_case: List[Any] = self.prepare_image_inputs()
__snake_case: List[str] = image_processor(A , return_tensors="""np""" )
__snake_case: Optional[Any] = processor(images=A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: str = self.get_image_processor()
__snake_case: Optional[int] = self.get_tokenizer()
__snake_case: Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
__snake_case: List[str] = """Alexandra,T-shirt的价格是15便士。"""
__snake_case: str = processor(text=A )
__snake_case: int = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.get_image_processor()
__snake_case: Union[str, Any] = self.get_tokenizer()
__snake_case: List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
__snake_case: Tuple = """Alexandra,T-shirt的价格是15便士。"""
__snake_case: List[Any] = self.prepare_image_inputs()
__snake_case: Optional[Any] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = self.get_image_processor()
__snake_case: Optional[int] = self.get_tokenizer()
__snake_case: int = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
__snake_case: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case: int = processor.batch_decode(A )
__snake_case: Union[str, Any] = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: int = self.get_image_processor()
__snake_case: Optional[int] = self.get_tokenizer()
__snake_case: Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
__snake_case: int = """Alexandra,T-shirt的价格是15便士。"""
__snake_case: List[str] = self.prepare_image_inputs()
__snake_case: List[str] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 155
| 1
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__lowercase )}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The input training data file (a text file)."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input train ref data file for whole word mask in Chinese."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether ot not to use whole word mask."} )
UpperCAmelCase = field(
default=0.1_5, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
UpperCAmelCase = field(
default=1 / 6, metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
}, )
UpperCAmelCase = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
UpperCAmelCase = field(
default=-1, metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( __snake_case , __snake_case , __snake_case = False , __snake_case = None , ):
def _dataset(__snake_case , __snake_case=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size , ref_path=__snake_case , )
return LineByLineTextDataset(tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
_UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
_UpperCamelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
_UpperCamelCase = AutoModelWithLMHead.from_config(__snake_case )
model.resize_token_embeddings(len(__snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
_UpperCamelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCamelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_UpperCamelCase = (
get_dataset(__snake_case , tokenizer=__snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCamelCase = (
get_dataset(__snake_case , tokenizer=__snake_case , evaluate=__snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCamelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=__snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCamelCase = DataCollatorForWholeWordMask(
tokenizer=__snake_case , mlm_probability=data_args.mlm_probability )
else:
_UpperCamelCase = DataCollatorForLanguageModeling(
tokenizer=__snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , data_collator=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , prediction_loss_only=__snake_case , )
# Training
if training_args.do_train:
_UpperCamelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = math.exp(eval_output['''eval_loss'''] )
_UpperCamelCase = {'''perplexity''': perplexity}
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , __snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10
|
import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __a ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCAmelCase__ , 2 ) - pow(lowerCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCAmelCase__ , 2 ) - pow(lowerCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCAmelCase__ , 2 ) + pow(lowerCAmelCase__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
from __future__ import annotations
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
if b == 0:
return (1, 0)
((a__) , (a__)) : int = extended_euclid(lowerCAmelCase__ , a % b )
a__ : Optional[int] = a // b
return (y, x - k * y)
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
((a__) , (a__)) : int = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int = na * na
a__ : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
((a__) , (a__)) : Union[str, Any] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
a__ : Optional[Any] = (b % n + n) % n
return b
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
a__ , a__ : Union[str, Any] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = na * na
a__ : str = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 340
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase = logging.get_logger(__name__)
_lowercase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_lowercase = {
"""allenai/led-base-16384""": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A ():
_lowerCAmelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_lowerCAmelCase = bs[:]
_lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def A (__lowerCamelCase :List[str] ):
_lowerCAmelCase = set()
_lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase = char
return pairs
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase = json.load(_lowercase )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = errors # how to handle errors in decoding
_lowerCAmelCase = bytes_to_unicode()
_lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCAmelCase = {}
_lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowerCAmelCase = tuple(_lowercase )
_lowerCAmelCase = get_pairs(_lowercase )
if not pairs:
return token
while True:
_lowerCAmelCase = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase = bigram
_lowerCAmelCase = []
_lowerCAmelCase = 0
while i < len(_lowercase ):
try:
_lowerCAmelCase = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase = tuple(_lowercase )
_lowerCAmelCase = new_word
if len(_lowercase ) == 1:
break
else:
_lowerCAmelCase = get_pairs(_lowercase )
_lowerCAmelCase = """ """.join(_lowercase )
_lowerCAmelCase = word
return word
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = []
for token in re.findall(self.pat , _lowercase ):
_lowerCAmelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(""" """ ) )
return bpe_tokens
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.decoder.get(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = """""".join(_lowercase )
_lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if not os.path.isdir(_lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
_lowerCAmelCase = 0
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , _lowercase , _lowercase=False , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
_lowerCAmelCase = """ """ + text
return (text, kwargs)
def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = PaddingStrategy.DO_NOT_PAD , _lowercase = None , _lowercase = None , ):
"""simple docstring"""
_lowerCAmelCase = super()._pad(
encoded_inputs=_lowercase , max_length=_lowercase , padding_strategy=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , )
# Load from model defaults
if return_attention_mask is None:
_lowerCAmelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCAmelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(_lowercase )
if needs_to_be_padded:
_lowerCAmelCase = len(_lowercase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCAmelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCAmelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 5
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCamelCase = logging.getLogger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """summarization"""
__SCREAMING_SNAKE_CASE = ["""loss"""]
__SCREAMING_SNAKE_CASE = ROUGE_KEYS
__SCREAMING_SNAKE_CASE = """rouge2"""
def __init__(self , __a , **__a ) -> Optional[int]:
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase__ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(__a , num_labels=__a , mode=self.mode , **__a )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
UpperCAmelCase__ = Path(self.output_dir ) / 'metrics.json'
UpperCAmelCase__ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(__a )
UpperCAmelCase__ = self.config.model_type
UpperCAmelCase__ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
UpperCAmelCase__ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase__ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
UpperCAmelCase__ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase__ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase__ = get_git_info()['repo_sha']
UpperCAmelCase__ = hparams.num_workers
UpperCAmelCase__ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __a ):
UpperCAmelCase__ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase__ = self.decoder_start_token_id
UpperCAmelCase__ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
UpperCAmelCase__ = False
UpperCAmelCase__ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase__ = self.hparams.eval_max_gen_length
else:
UpperCAmelCase__ = self.model.config.max_length
UpperCAmelCase__ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase__ (self , __a ) -> Dict[str, List[str]]:
"""simple docstring"""
UpperCAmelCase__ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(__a , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
UpperCAmelCase__ = True
return readable_batch
def UpperCamelCase__ (self , __a , **__a ) -> int:
"""simple docstring"""
return self.model(__a , **__a )
def UpperCamelCase__ (self , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
return lmap(str.strip , __a )
def UpperCamelCase__ (self , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.pad_token_id
UpperCAmelCase__ , UpperCAmelCase__ = batch['input_ids'], batch['attention_mask']
UpperCAmelCase__ = batch['labels']
if isinstance(self.model , __a ):
UpperCAmelCase__ = self.model._shift_right(__a )
else:
UpperCAmelCase__ = shift_tokens_right(__a , __a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase__ = decoder_input_ids
self.save_readable_batch(__a )
UpperCAmelCase__ = self(__a , attention_mask=__a , decoder_input_ids=__a , use_cache=__a )
UpperCAmelCase__ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase__ = nn.CrossEntropyLoss(ignore_index=__a )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase__ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCAmelCase__ = nn.functional.log_softmax(__a , dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = label_smoothed_nll_loss(
__a , __a , self.hparams.label_smoothing , ignore_index=__a )
return (loss,)
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase__ (self , __a , __a ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self._step(__a )
UpperCAmelCase__ = dict(zip(self.loss_names , __a ) )
# tokens per batch
UpperCAmelCase__ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
UpperCAmelCase__ = batch['input_ids'].shape[0]
UpperCAmelCase__ = batch['input_ids'].eq(self.pad ).sum()
UpperCAmelCase__ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase__ (self , __a , __a ) -> Dict:
"""simple docstring"""
return self._generative_step(__a )
def UpperCamelCase__ (self , __a , __a="val" ) -> Dict:
"""simple docstring"""
self.step_count += 1
UpperCAmelCase__ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase__ = losses['loss']
UpperCAmelCase__ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
UpperCAmelCase__ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase__ = torch.tensor(__a ).type_as(__a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__a )
UpperCAmelCase__ = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
UpperCAmelCase__ = self.step_count
self.metrics[prefix].append(__a ) # callback writes this to self.metrics_save_path
UpperCAmelCase__ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def UpperCamelCase__ (self , __a , __a ) -> Dict:
"""simple docstring"""
return calculate_rouge(__a , __a )
def UpperCamelCase__ (self , __a ) -> dict:
"""simple docstring"""
UpperCAmelCase__ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase__ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=__a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase__ = (time.time() - ta) / batch['input_ids'].shape[0]
UpperCAmelCase__ = self.ids_to_clean_text(__a )
UpperCAmelCase__ = self.ids_to_clean_text(batch['labels'] )
UpperCAmelCase__ = self._step(__a )
UpperCAmelCase__ = dict(zip(self.loss_names , __a ) )
UpperCAmelCase__ = self.calc_generative_metrics(__a , __a )
UpperCAmelCase__ = np.mean(lmap(__a , __a ) )
base_metrics.update(gen_time=__a , gen_len=__a , preds=__a , target=__a , **__a )
return base_metrics
def UpperCamelCase__ (self , __a , __a ) -> int:
"""simple docstring"""
return self._generative_step(__a )
def UpperCamelCase__ (self , __a ) -> int:
"""simple docstring"""
return self.validation_epoch_end(__a , prefix='test' )
def UpperCamelCase__ (self , __a ) -> SeqaSeqDataset:
"""simple docstring"""
UpperCAmelCase__ = self.n_obs[type_path]
UpperCAmelCase__ = self.target_lens[type_path]
UpperCAmelCase__ = self.dataset_class(
self.tokenizer , type_path=__a , n_obs=__a , max_target_length=__a , **self.dataset_kwargs , )
return dataset
def UpperCamelCase__ (self , __a , __a , __a = False ) -> DataLoader:
"""simple docstring"""
UpperCAmelCase__ = self.get_dataset(__a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase__ = dataset.make_sortish_sampler(__a , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase__ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_sampler=__a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
def UpperCamelCase__ (self ) -> DataLoader:
"""simple docstring"""
UpperCAmelCase__ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=__a )
return dataloader
def UpperCamelCase__ (self ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase__ (self ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase__ (__a , __a ) -> Dict:
"""simple docstring"""
BaseTransformer.add_model_specific_args(__a , __a )
add_generic_args(__a , __a )
parser.add_argument(
'--max_source_length' , default=1024 , type=__a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=__a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=__a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=__a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=__a )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=__a )
parser.add_argument('--max_tokens_per_batch' , type=__a , default=__a )
parser.add_argument('--logger_name' , type=__a , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=__a , default=-1 , required=__a , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=__a , default=500 , required=__a , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=__a , default=-1 , required=__a , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=__a , default='summarization' , required=__a , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=__a , default=0.0 , required=__a )
parser.add_argument('--src_lang' , type=__a , default='' , required=__a )
parser.add_argument('--tgt_lang' , type=__a , default='' , required=__a )
parser.add_argument('--eval_beams' , type=__a , default=__a , required=__a )
parser.add_argument(
'--val_metric' , type=__a , default=__a , required=__a , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=__a , default=__a , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=__a , default=1 , required=__a , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=__a , default=-1 , required=__a , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """translation"""
__SCREAMING_SNAKE_CASE = ["""loss"""]
__SCREAMING_SNAKE_CASE = ["""bleu"""]
__SCREAMING_SNAKE_CASE = """bleu"""
def __init__(self , __a , **__a ) -> Any:
"""simple docstring"""
super().__init__(__a , **__a )
UpperCAmelCase__ = hparams.src_lang
UpperCAmelCase__ = hparams.tgt_lang
def UpperCamelCase__ (self , __a , __a ) -> dict:
"""simple docstring"""
return calculate_bleu(__a , __a )
def UpperCamelCase_( snake_case__: int , snake_case__: str=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=snake_case__ )
check_output_dir(snake_case__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase__ = SummarizationModule(snake_case__ )
else:
UpperCAmelCase__ = TranslationModule(snake_case__ )
UpperCAmelCase__ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
UpperCAmelCase__ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase__ = os.environ.get('WANDB_PROJECT' , snake_case__ )
UpperCAmelCase__ = WandbLogger(name=model.output_dir.name , project=snake_case__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase__ = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
UpperCAmelCase__ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase__ = False
UpperCAmelCase__ = args.val_metric == 'loss'
UpperCAmelCase__ = generic_train(
snake_case__ , snake_case__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , snake_case__ ) , early_stopping_callback=snake_case__ , logger=snake_case__ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
UpperCAmelCase__ = ''
UpperCAmelCase__ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=snake_case__ ) )
if checkpoints:
UpperCAmelCase__ = checkpoints[-1]
UpperCAmelCase__ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
_UpperCamelCase = pl.Trainer.add_argparse_args(parser)
_UpperCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCamelCase = parser.parse_args()
main(args)
| 146
| 0
|
from __future__ import annotations
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = order
# a_{0} ... a_{k}
UpperCamelCase__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase__ = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase__ = [0.0] * self.order
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < self.order:
UpperCamelCase__ = [1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE_ ) != self.order + 1:
UpperCamelCase__ = (
F"Expected a_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE_ )}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != self.order + 1:
UpperCamelCase__ = (
F"Expected b_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE_ )}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = a_coeffs
UpperCamelCase__ = b_coeffs
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase__ = self.input_history[:-1]
UpperCamelCase__ = self.output_history[:-1]
UpperCamelCase__ = sample
UpperCamelCase__ = result
return result
| 86
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase_ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __magic_name__ ( __a : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __magic_name__ ( __a : List[Any] , __a : Any ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
elif args.student_type == "gpt2":
UpperCamelCase__ = False
def __magic_name__ ( __a : int , __a : Dict ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__a , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=__a , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__a , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__a , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" )
UpperCamelCase__ = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__a ) , __a , indent=4 )
git_log(args.dump_path )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCamelCase__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase__ = tokenizer.all_special_tokens.index(__a )
UpperCamelCase__ = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
UpperCamelCase__ = special_tok_ids
UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a )
UpperCamelCase__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase__ = 0.0 # do not predict special tokens
UpperCamelCase__ = torch.from_numpy(__a )
else:
UpperCamelCase__ = None
UpperCamelCase__ = LmSeqsDataset(params=__a , data=__a )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
UpperCamelCase__ = student_config_class.from_pretrained(args.student_config )
UpperCamelCase__ = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a )
else:
UpperCamelCase__ = student_model_class(__a )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a , __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a , __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase__ = Distiller(
params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 86
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Union[str, Any] ={
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =[
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __a :
def __init__( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=None , snake_case_ : str="resnet50" , snake_case_ : List[Any]=3 , snake_case_ : Optional[int]=32 , snake_case_ : Union[str, Any]=3 , snake_case_ : Tuple=True , snake_case_ : List[str]=True , )-> Optional[Any]:
__lowerCAmelCase =parent
__lowerCAmelCase =out_indices if out_indices is not None else [4]
__lowerCAmelCase =stage_names
__lowerCAmelCase =out_features
__lowerCAmelCase =backbone
__lowerCAmelCase =batch_size
__lowerCAmelCase =image_size
__lowerCAmelCase =num_channels
__lowerCAmelCase =use_pretrained_backbone
__lowerCAmelCase =is_training
def UpperCamelCase ( self : int)-> Dict:
__lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase =self.get_config()
return config, pixel_values
def UpperCamelCase ( self : Optional[int])-> Optional[int]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase ( self : str , snake_case_ : int , snake_case_ : Union[str, Any])-> str:
__lowerCAmelCase =TimmBackbone(config=snake_case_)
model.to(snake_case_)
model.eval()
with torch.no_grad():
__lowerCAmelCase =model(snake_case_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase ( self : List[str])-> Union[str, Any]:
__lowerCAmelCase =self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase =config_and_inputs
__lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : Union[str, Any])-> str:
__lowerCAmelCase =TimmBackboneModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_)
def UpperCamelCase ( self : Tuple)-> Optional[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self : Any)-> Dict:
__lowerCAmelCase ="""resnet18"""
__lowerCAmelCase ="""microsoft/resnet-18"""
__lowerCAmelCase =AutoBackbone.from_pretrained(snake_case_ , use_timm_backbone=snake_case_)
__lowerCAmelCase =AutoBackbone.from_pretrained(snake_case_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
__lowerCAmelCase =AutoBackbone.from_pretrained(snake_case_ , use_timm_backbone=snake_case_ , out_indices=[1, 2, 3])
__lowerCAmelCase =AutoBackbone.from_pretrained(snake_case_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""")
def UpperCamelCase ( self : Dict)-> Any:
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""")
def UpperCamelCase ( self : Tuple)-> Dict:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""")
def UpperCamelCase ( self : Union[str, Any])-> List[str]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""")
def UpperCamelCase ( self : List[str])-> List[str]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""")
def UpperCamelCase ( self : int)-> int:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""")
def UpperCamelCase ( self : Dict)-> int:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def UpperCamelCase ( self : Any)-> Dict:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""")
def UpperCamelCase ( self : int)-> Tuple:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""")
def UpperCamelCase ( self : Optional[int])-> Union[str, Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def UpperCamelCase ( self : Optional[Any])-> Optional[Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""")
def UpperCamelCase ( self : Any)-> List[Any]:
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""")
def UpperCamelCase ( self : Any)-> Tuple:
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""")
def UpperCamelCase ( self : List[Any])-> str:
pass
@unittest.skip("""Safetensors is not supported by timm.""")
def UpperCamelCase ( self : Dict)-> Optional[int]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def UpperCamelCase ( self : Optional[int])-> Tuple:
pass
def UpperCamelCase ( self : int)-> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase =model_class(snake_case_)
__lowerCAmelCase =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase =[*signature.parameters.keys()]
__lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_)
def UpperCamelCase ( self : Dict)-> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase =True
__lowerCAmelCase =self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase =self.all_model_classes[0]
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
__lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_)
__lowerCAmelCase =model(**snake_case_)
__lowerCAmelCase =outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def UpperCamelCase ( self : Tuple)-> List[Any]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(**snake_case_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase =copy.deepcopy(snake_case_)
__lowerCAmelCase =None
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(**snake_case_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
__lowerCAmelCase =copy.deepcopy(snake_case_)
__lowerCAmelCase =False
__lowerCAmelCase =model_class(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(**snake_case_)
| 354
| 0
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def snake_case ( lowerCAmelCase_ ) -> Dict[str, torch.Tensor]:
_snake_case = []
_snake_case = []
_snake_case = []
for rt in rc.restypes:
_snake_case = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_snake_case = {name: i for i, name in enumerate(lowerCAmelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_snake_case = torch.tensor(
lowerCAmelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
_snake_case = torch.tensor(
lowerCAmelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
_snake_case = torch.tensor(
lowerCAmelCase_ , dtype=torch.floataa , device=protein['''aatype'''].device , )
_snake_case = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_snake_case = restype_atomaa_to_atomaa[protein_aatype]
_snake_case = restype_atomaa_mask[protein_aatype]
_snake_case = residx_atomaa_mask
_snake_case = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_snake_case = restype_atomaa_to_atomaa[protein_aatype]
_snake_case = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_snake_case = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
_snake_case = rc.restype_atoa[restype_letter]
_snake_case = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_snake_case = rc.atom_order[atom_name]
_snake_case = 1
_snake_case = restype_atomaa_mask[protein_aatype]
_snake_case = residx_atomaa_mask
return protein
def snake_case ( lowerCAmelCase_ ) -> Dict[str, np.ndarray]:
_snake_case = tree_map(lambda lowerCAmelCase_ : torch.tensor(lowerCAmelCase_ , device=batch['''aatype'''].device ) , lowerCAmelCase_ , np.ndarray )
_snake_case = tensor_tree_map(lambda lowerCAmelCase_ : np.array(lowerCAmelCase_ ) , make_atomaa_masks(lowerCAmelCase_ ) )
return out
| 404
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = '''time_series_transformer'''
A__ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "student_t" , __lowerCamelCase : str = "nll" , __lowerCamelCase : int = 1 , __lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCamelCase : Optional[Union[str, bool]] = "mean" , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : bool = True , __lowerCamelCase : str = "gelu" , __lowerCamelCase : int = 6_4 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 1_0_0 , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
# time series specific configuration
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = cardinality
else:
_snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = embedding_dimension
else:
_snake_case = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(__lowerCamelCase ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 404
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__UpperCAmelCase = logging.getLogger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = """sequence-classification"""
def __init__( self : str ,A : int ):
'''simple docstring'''
if type(A ) == dict:
UpperCAmelCase__ : Tuple = Namespace(**A )
UpperCAmelCase__ : List[str] = glue_output_modes[hparams.task]
UpperCAmelCase__ : Optional[int] = glue_tasks_num_labels[hparams.task]
super().__init__(A ,A ,self.mode )
def __lowercase ( self : int ,**A : Dict ):
'''simple docstring'''
return self.model(**A )
def __lowercase ( self : Dict ,A : Any ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase__ : Optional[int] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
UpperCAmelCase__ : Union[str, Any] = self(**A )
UpperCAmelCase__ : Dict = outputs[0]
UpperCAmelCase__ : Union[str, Any] = self.trainer.lr_schedulers[0]["""scheduler"""]
UpperCAmelCase__ : Optional[Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.hparams
UpperCAmelCase__ : str = processors[args.task]()
UpperCAmelCase__ : List[Any] = processor.get_labels()
for mode in ["train", "dev"]:
UpperCAmelCase__ : Union[str, Any] = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,A )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
UpperCAmelCase__ : Optional[int] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
UpperCAmelCase__ : Tuple = convert_examples_to_features(
A ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,A )
torch.save(A ,A )
def __lowercase ( self : List[Any] ,A : str ,A : int ,A : bool = False ):
'''simple docstring'''
UpperCAmelCase__ : str = """dev""" if mode == """test""" else mode
UpperCAmelCase__ : Dict = self._feature_file(A )
logger.info("""Loading features from cached file %s""" ,A )
UpperCAmelCase__ : Any = torch.load(A )
UpperCAmelCase__ : Dict = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
UpperCAmelCase__ : Dict = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
UpperCAmelCase__ : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase__ : str = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase__ : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(A ,A ,A ,A ) ,batch_size=A ,shuffle=A ,)
def __lowercase ( self : List[str] ,A : Union[str, Any] ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase__ : List[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
UpperCAmelCase__ : Dict = self(**A )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = outputs[:2]
UpperCAmelCase__ : Optional[Any] = logits.detach().cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowercase ( self : Dict ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
UpperCAmelCase__ : Optional[int] = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase__ : str = np.argmax(A ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase__ : Dict = np.squeeze(A )
UpperCAmelCase__ : List[str] = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
UpperCAmelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ : int = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ : List[Any] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,A ,A )}
UpperCAmelCase__ : Any = dict(results.items() )
UpperCAmelCase__ : Optional[int] = results
return ret, preds_list, out_label_list
def __lowercase ( self : int ,A : list ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = self._eval_end(A )
UpperCAmelCase__ : str = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowercase ( self : Any ,A : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self._eval_end(A )
UpperCAmelCase__ : Union[str, Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowercase ( A : List[str] ,A : Union[str, Any] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(A ,A )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=A ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=A ,required=A ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=A ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
UpperCAmelCase__ : Tuple = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
UpperCAmelCase__ : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCAmelCase__ : Optional[int] = os.path.join(
"""./results""" , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
UpperCAmelCase__ : str = GLUETransformer(__UpperCamelCase )
UpperCAmelCase__ : Tuple = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCAmelCase__ : Tuple = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main()
| 65
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE__ : List[Any] ='\\n\n'
SCREAMING_SNAKE_CASE__ : int ='\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
SCREAMING_SNAKE_CASE__ : str ='\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def a__ ( self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = True , _lowercase=None ) -> List[str]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCamelCase : Optional[Any] = '''cuda'''
else:
_lowerCamelCase : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained(_lowercase )
_lowerCamelCase : Any = model.to(_lowercase )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowercase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCamelCase : Tuple = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_lowercase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCamelCase : Any = model.config.max_length - 1
else:
_lowerCamelCase : str = model.config.max_length
_lowerCamelCase : Tuple = tokenizer(
_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors='''pt''' , return_attention_mask=_lowercase , ).to(_lowercase )
_lowerCamelCase : Tuple = encodings['''input_ids''']
_lowerCamelCase : Tuple = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCamelCase : List[str] = []
_lowerCamelCase : List[str] = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_lowercase ) , _lowercase ) ):
_lowerCamelCase : str = min(start_index + batch_size , len(_lowercase ) )
_lowerCamelCase : Tuple = encoded_texts[start_index:end_index]
_lowerCamelCase : Tuple = attn_masks[start_index:end_index]
if add_start_token:
_lowerCamelCase : Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_lowercase )
_lowerCamelCase : Dict = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_lowerCamelCase : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_lowercase ), attn_mask] , dim=1 )
_lowerCamelCase : Tuple = encoded_batch
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowercase , attention_mask=_lowercase ).logits
_lowerCamelCase : Tuple = out_logits[..., :-1, :].contiguous()
_lowerCamelCase : Tuple = labels[..., 1:].contiguous()
_lowerCamelCase : List[str] = attn_mask[..., 1:].contiguous()
_lowerCamelCase : Dict = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _lowercase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_lowercase )}
| 705
|
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
_lowerCamelCase : int = len(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[str] = len(matrix[0] )
_lowerCamelCase : Dict = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for row in range(SCREAMING_SNAKE_CASE_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : Dict = matrix[col][row] / matrix[row][row]
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_lowerCamelCase : str = True
for i in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
if matrix[i][row] != 0:
_lowerCamelCase, _lowerCamelCase : Dict = matrix[i], matrix[row]
_lowerCamelCase : Optional[Any] = False
break
if reduce:
rank -= 1
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : str = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 558
| 0
|
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCamelCase_ = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
lowerCamelCase_ = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
lowerCamelCase_ = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
lowerCamelCase_ = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
lowerCamelCase_ = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
lowerCamelCase_ = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
lowerCamelCase_ = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
lowerCamelCase_ = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
lowerCamelCase_ = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
lowerCamelCase_ = key.replace('image_encoder.module' , 'flava.image_model' )
lowerCamelCase_ = key.replace('text_encoder.module' , 'flava.text_model' )
lowerCamelCase_ = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
lowerCamelCase_ = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
lowerCamelCase_ = key.replace('text_projection' , 'flava.text_projection' )
lowerCamelCase_ = key.replace('image_projection' , 'flava.image_projection' )
lowerCamelCase_ = value.float()
for key, value in codebook_state_dict.items():
lowerCamelCase_ = value
return upgrade
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[int]=None ):
'''simple docstring'''
if config_path is not None:
lowerCamelCase_ = FlavaConfig.from_pretrained(lowercase )
else:
lowerCamelCase_ = FlavaConfig()
lowerCamelCase_ = FlavaForPreTraining(lowercase ).eval()
lowerCamelCase_ = convert_dalle_checkpoint(lowercase , lowercase , save_checkpoint=lowercase )
if os.path.exists(lowercase ):
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )
else:
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowercase , map_location='cpu' )
lowerCamelCase_ = upgrade_state_dict(lowercase , lowercase )
hf_model.load_state_dict(lowercase )
lowerCamelCase_ = hf_model.state_dict()
lowerCamelCase_ = count_parameters(lowercase )
lowerCamelCase_ = count_parameters(lowercase ) + count_parameters(lowercase )
assert torch.allclose(lowercase , lowercase , atol=1e-3 )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCamelCase : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 70
|
from maths.prime_check import is_prime
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
lowerCamelCase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase )
if is_prime(lowercase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
| 1
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCamelCase_( lowerCamelCase_ ) -> None:
_lowercase , _lowercase : Any = analyze_text(lowerCamelCase_ )
_lowercase : str = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : str = sum(single_char_strings.values() )
# one length string
_lowercase : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : int = single_char_strings[ch]
_lowercase : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCamelCase_ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : Optional[int] = sum(two_char_strings.values() )
_lowercase : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : int = cha + cha
if sequence in two_char_strings:
_lowercase : Any = two_char_strings[sequence]
_lowercase : Dict = int(lowerCamelCase_ ) / all_sum
my_sec_sum += prob * math.loga(lowerCamelCase_ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def UpperCamelCase_( lowerCamelCase_ ) -> tuple[dict, dict]:
_lowercase : List[str] = Counter() # type: ignore
_lowercase : List[str] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCamelCase_( ) -> int:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 354
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : List[Any] = '\n'.join(lowerCamelCase_ )
Path(lowerCamelCase_ ).open('w' ).writelines(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = "patrickvonplaten/t5-tiny-random"
SCREAMING_SNAKE_CASE : List[Any] = "sshleifer/bart-tiny-random"
SCREAMING_SNAKE_CASE : int = "sshleifer/tiny-mbart"
SCREAMING_SNAKE_CASE : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowerCamelCase( _a ):
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : int = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
_lowercase : str = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowercase : str = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = str(Path(self.get_auto_remove_tmp_dir()) / 'scores.json')
_lowercase : Optional[int] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_lowercase : Any = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
run_generate()
assert Path(lowerCamelCase).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.run_eval_tester(lowerCamelCase)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.run_eval_tester(lowerCamelCase)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
_lowercase : Any = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowercase : List[str] = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_lowercase : Optional[Any] = Path(self.get_auto_remove_tmp_dir())
_lowercase : Optional[Any] = str(tmp_dir / 'scores.json')
_lowercase : str = str(tmp_dir / 'val.target')
_dump_articles(lowerCamelCase, text['en'])
_dump_articles(lowerCamelCase, text['de'])
_lowercase : Tuple = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_lowercase : Tuple = F'''
run_eval_search.py
{model}
{str(lowerCamelCase)}
{str(lowerCamelCase)}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'])
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
with CaptureStdout() as cs:
run_search()
_lowercase : Dict = [' num_beams | length_penalty', model, 'Best score args']
_lowercase : Optional[Any] = ['Info']
if "translation" in task:
expected_strings.append('bleu')
else:
expected_strings.extend(lowerCamelCase)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase).exists()
os.remove(Path(lowerCamelCase))
| 354
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case (_UpperCamelCase , unittest.TestCase ):
__a = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __a ( self: Dict , A_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCamelCase__ ) )
__lowerCamelCase = np.random.RandomState(UpperCamelCase__ )
__lowerCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self: Dict ):
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
__lowerCamelCase = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __a ( self: Dict ):
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__lowerCamelCase = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __a ( self: Optional[int] ):
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# warmup pass to apply optimizations
__lowerCamelCase = pipe(**self.get_dummy_inputs() )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__lowerCamelCase = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __a ( self: str ):
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__lowerCamelCase = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __a ( self: int ):
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__lowerCamelCase = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __a ( self: Any ):
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs()
__lowerCamelCase = pipe(**UpperCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__lowerCamelCase = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case (unittest.TestCase ):
@property
def __a ( self: Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __a ( self: Any ):
__lowerCamelCase = ort.SessionOptions()
__lowerCamelCase = False
return options
def __a ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowerCamelCase = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowerCamelCase = "A fantasy landscape, trending on artstation"
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
__lowerCamelCase = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __a ( self: List[str] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowerCamelCase = init_image.resize((7_68, 5_12) )
__lowerCamelCase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowerCamelCase = "A fantasy landscape, trending on artstation"
__lowerCamelCase = np.random.RandomState(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type="""np""" , )
__lowerCamelCase = output.images
__lowerCamelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
__lowerCamelCase = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 281
|
"""simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656
| 0
|
"""simple docstring"""
from torch import nn
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 704
|
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( folder_based_builder.FolderBasedBuilderConfig ):
lowercase__ = None
lowercase__ = None
class SCREAMING_SNAKE_CASE__ ( folder_based_builder.FolderBasedBuilder ):
lowercase__ = datasets.Audio()
lowercase__ = "audio"
lowercase__ = AudioFolderConfig
lowercase__ = 42 # definition at the bottom of the script
lowercase__ = AudioClassification(audio_column="audio" , label_column="label" )
UpperCAmelCase : Tuple = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase : Any = AUDIO_EXTENSIONS
| 100
| 0
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _snake_case :
def __init__( self: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int=13 , __lowerCamelCase: str=7 , __lowerCamelCase: List[str]=6 , __lowerCamelCase: Any=17 , __lowerCamelCase: Optional[int]=23 , __lowerCamelCase: Union[str, Any]=11 , __lowerCamelCase: Dict=True , ) -> int:
__UpperCAmelCase : int = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Any = act_dim
__UpperCAmelCase : str = state_dim
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : str = max_length
__UpperCAmelCase : Tuple = is_training
def _lowerCamelCase ( self: List[str] ) -> List[str]:
__UpperCAmelCase : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__UpperCAmelCase : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__UpperCAmelCase : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
__UpperCAmelCase : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
__UpperCAmelCase : List[Any] = random_attention_mask((self.batch_size, self.seq_length) )
__UpperCAmelCase : Dict = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowerCamelCase ( self: int ) -> Union[str, Any]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: Tuple , ) -> List[str]:
__UpperCAmelCase : Optional[Any] = DecisionTransformerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase : str = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : Dict = config_and_inputs
__UpperCAmelCase : Tuple = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowerCamelCase__: Dict = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCamelCase__: Any = ()
lowerCamelCase__: List[Any] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCamelCase__: Dict = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: Tuple = False
lowerCamelCase__: Optional[Any] = False
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: str = False
lowerCamelCase__: List[str] = False
def _lowerCamelCase ( self: Any ) -> Any:
__UpperCAmelCase : List[str] = DecisionTransformerModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowerCamelCase ( self: Tuple ) -> int:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@slow
def _lowerCamelCase ( self: Any ) -> Optional[int]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = DecisionTransformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Tuple = [*signature.parameters.keys()]
__UpperCAmelCase : Union[str, Any] = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(__SCREAMING_SNAKE_CASE )] , __SCREAMING_SNAKE_CASE )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: str ) -> Tuple:
__UpperCAmelCase : List[Any] = 2 # number of steps of autoregressive prediction we will perform
__UpperCAmelCase : List[Any] = 10 # defined by the RL environment, may be normalized
__UpperCAmelCase : Union[str, Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
__UpperCAmelCase : Any = model.to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : int = model.config
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = torch.randn(1 , 1 , config.state_dim ).to(device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa ) # env.reset()
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Dict = torch.tensor(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__UpperCAmelCase : List[Any] = state
__UpperCAmelCase : Tuple = torch.zeros(1 , 0 , config.act_dim , device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
__UpperCAmelCase : int = torch.zeros(1 , 0 , device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
__UpperCAmelCase : Any = torch.tensor(0 , device=__SCREAMING_SNAKE_CASE , dtype=torch.long ).reshape(1 , 1 )
for step in range(__SCREAMING_SNAKE_CASE ):
__UpperCAmelCase : List[str] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__SCREAMING_SNAKE_CASE )] , dim=1 )
__UpperCAmelCase : int = torch.cat([rewards, torch.zeros(1 , 1 , device=__SCREAMING_SNAKE_CASE )] , dim=1 )
__UpperCAmelCase : Tuple = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(
states=__SCREAMING_SNAKE_CASE , actions=__SCREAMING_SNAKE_CASE , rewards=__SCREAMING_SNAKE_CASE , returns_to_go=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__UpperCAmelCase : List[str] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__SCREAMING_SNAKE_CASE , dtype=torch.floataa ),
1.0,
False,
{},
)
__UpperCAmelCase : str = action_pred[0, -1]
__UpperCAmelCase : List[str] = torch.cat([states, state] , dim=1 )
__UpperCAmelCase : Any = returns_to_go[0, -1] - reward
__UpperCAmelCase : Optional[int] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__UpperCAmelCase : int = torch.cat(
[timesteps, torch.ones((1, 1) , device=__SCREAMING_SNAKE_CASE , dtype=torch.long ) * (step + 1)] , dim=1 )
| 382
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase__ = '''src/diffusers'''
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
lowerCamelCase__ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
lowerCamelCase__ = '''
{0} = None
'''
lowerCamelCase__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
lowerCamelCase__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
snake_case__ : Tuple =_re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowercase_ ( ):
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : int =f.readlines()
# Get to the point we do the actual imports for type checking
snake_case__ : Optional[Any] =0
snake_case__ : Any ={}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case__ : List[str] =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
snake_case__ : List[Any] =[]
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
snake_case__ : List[str] =lines[line_index]
snake_case__ : Any =_re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
snake_case__ : List[Any] =objects
else:
line_index += 1
return backend_specific_objects
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowercase_ ( SCREAMING_SNAKE_CASE : str=None ):
"""simple docstring"""
if backend_specific_objects is None:
snake_case__ : int =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case__ : Dict ={}
for backend, objects in backend_specific_objects.items():
snake_case__ : str ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
snake_case__ : List[Any] ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
snake_case__ : int =dummy_file
return dummy_files
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int]=False ):
"""simple docstring"""
snake_case__ : Dict =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case__ : int ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
snake_case__ : List[Any] =os.path.join(SCREAMING_SNAKE_CASE , '''utils''' )
snake_case__ : str ={
backend: os.path.join(SCREAMING_SNAKE_CASE , F'''dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py''' )
for backend in dummy_files.keys()
}
snake_case__ : Tuple ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : Optional[int] =f.read()
else:
snake_case__ : Union[str, Any] =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 381
| 0
|
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( a ):
return getitem, k
def lowerCamelCase__ ( a , a ):
return setitem, k, v
def lowerCamelCase__ ( a ):
return delitem, k
def lowerCamelCase__ ( a , a , *a ):
try:
return fun(_UpperCAmelCase , *_UpperCAmelCase ), None
except Exception as e:
return None, e
_lowercase = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
_lowercase = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
_lowercase = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
_lowercase = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
_lowercase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_lowercase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCamelCase__ ( a ):
__snake_case = HashMap(initial_block_size=4 )
__snake_case = {}
for _, (fun, *args) in enumerate(_UpperCAmelCase ):
__snake_case , __snake_case = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
__snake_case , __snake_case = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
assert my_res == py_res
assert str(_UpperCAmelCase ) == str(_UpperCAmelCase )
assert set(_UpperCAmelCase ) == set(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase__ ( ):
def is_public(a ) -> bool:
return not name.startswith('_' )
__snake_case = {name for name in dir({} ) if is_public(_UpperCAmelCase )}
__snake_case = {name for name in dir(HashMap() ) if is_public(_UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 716
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_lowercase = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_lowercase = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_lowercase = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowercase__ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def lowercase__ ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Any=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__snake_case = np.array([re.sub(__lowerCAmelCase , '' , __lowerCAmelCase ) for x in predictions] )
__snake_case = np.array([re.sub(__lowerCAmelCase , '' , __lowerCAmelCase ) for x in references] )
else:
__snake_case = np.asarray(__lowerCAmelCase )
__snake_case = np.asarray(__lowerCAmelCase )
if ignore_case:
__snake_case = np.char.lower(__lowerCAmelCase )
__snake_case = np.char.lower(__lowerCAmelCase )
if ignore_punctuation:
__snake_case = string.punctuation.maketrans('' , '' , string.punctuation )
__snake_case = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
__snake_case = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
if ignore_numbers:
__snake_case = string.digits.maketrans('' , '' , string.digits )
__snake_case = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
__snake_case = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
__snake_case = predictions == references
return {"exact_match": np.mean(__lowerCAmelCase ) * 1_0_0}
| 427
| 0
|
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=1e-12 ):
"""simple docstring"""
lowerCAmelCase__ : int = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase , axis=1 ) , a_min=UpperCamelCase ) ).T
lowerCAmelCase__ : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase , axis=1 ) , a_min=UpperCamelCase ) ).T
return jnp.matmul(UpperCamelCase , norm_emb_a.T )
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
__lowercase : CLIPConfig
__lowercase : jnp.dtype = jnp.floataa
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : str = FlaxCLIPVisionModule(self.config.vision_config )
lowerCAmelCase__ : Optional[Any] = nn.Dense(self.config.projection_dim ,use_bias=__UpperCAmelCase ,dtype=self.dtype )
lowerCAmelCase__ : Tuple = self.param("""concept_embeds""" ,jax.nn.initializers.ones ,(17, self.config.projection_dim) )
lowerCAmelCase__ : Dict = self.param(
"""special_care_embeds""" ,jax.nn.initializers.ones ,(3, self.config.projection_dim) )
lowerCAmelCase__ : Dict = self.param("""concept_embeds_weights""" ,jax.nn.initializers.ones ,(17,) )
lowerCAmelCase__ : Dict = self.param("""special_care_embeds_weights""" ,jax.nn.initializers.ones ,(3,) )
def __call__( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Any = self.vision_model(__UpperCAmelCase )[1]
lowerCAmelCase__ : int = self.visual_projection(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = jax_cosine_distance(__UpperCAmelCase ,self.special_care_embeds )
lowerCAmelCase__ : Optional[int] = jax_cosine_distance(__UpperCAmelCase ,self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase__ : int = 0.0
lowerCAmelCase__ : List[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase__ : Dict = jnp.round(__UpperCAmelCase ,3 )
lowerCAmelCase__ : List[str] = jnp.any(special_scores > 0 ,axis=1 ,keepdims=__UpperCAmelCase )
# Use a lower threshold if an image has any special care concept
lowerCAmelCase__ : Optional[Any] = is_special_care * 0.0_1
lowerCAmelCase__ : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase__ : Tuple = jnp.round(__UpperCAmelCase ,3 )
lowerCAmelCase__ : Optional[int] = jnp.any(concept_scores > 0 ,axis=1 )
return has_nsfw_concepts
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = CLIPConfig
__lowercase : Dict = '''clip_input'''
__lowercase : Optional[Any] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = 0 ,__UpperCAmelCase = jnp.floataa ,__UpperCAmelCase = True ,**__UpperCAmelCase ,) -> int:
if input_shape is None:
lowerCAmelCase__ : Optional[Any] = (1, 224, 224, 3)
lowerCAmelCase__ : List[str] = self.module_class(config=__UpperCAmelCase ,dtype=__UpperCAmelCase ,**__UpperCAmelCase )
super().__init__(__UpperCAmelCase ,__UpperCAmelCase ,input_shape=__UpperCAmelCase ,seed=__UpperCAmelCase ,dtype=__UpperCAmelCase ,_do_init=_do_init )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> FrozenDict:
# init input tensor
lowerCAmelCase__ : List[Any] = jax.random.normal(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = jax.random.split(__UpperCAmelCase )
lowerCAmelCase__ : Any = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase__ : int = self.module.init(__UpperCAmelCase ,__UpperCAmelCase )["""params"""]
return random_params
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,) -> Union[str, Any]:
lowerCAmelCase__ : List[Any] = jnp.transpose(__UpperCAmelCase ,(0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} ,jnp.array(__UpperCAmelCase ,dtype=jnp.floataa ) ,rngs={} ,)
| 565
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowerCAmelCase__ : Optional[Any] = str(bin(UpperCamelCase ) )[2:] # remove the leading "0b"
lowerCAmelCase__ : Dict = str(bin(UpperCamelCase ) )[2:]
lowerCAmelCase__ : int = max(len(UpperCamelCase ) , len(UpperCamelCase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase ) , b_binary.zfill(UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[str] = """bridgetower_vision_model"""
def __init__( self, snake_case__=7_68, snake_case__=12, snake_case__=3, snake_case__=16, snake_case__=2_88, snake_case__=1, snake_case__=1E-05, snake_case__=False, snake_case__=True, snake_case__=False, **snake_case__, ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**snake_case__ )
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[int] = image_size
lowercase_ : Dict = initializer_factor
lowercase_ : Dict = layer_norm_eps
lowercase_ : Any = stop_gradient
lowercase_ : Union[str, Any] = share_layernorm
lowercase_ : Tuple = remove_last_layer
@classmethod
def snake_case__ ( cls, snake_case__, **snake_case__ ) -> "PretrainedConfig":
"""simple docstring"""
lowercase_ , lowercase_ : str = cls.get_config_dict(snake_case__, **snake_case__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowercase_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__, **snake_case__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[str] = """bridgetower_text_model"""
def __init__( self, snake_case__=5_02_65, snake_case__=7_68, snake_case__=12, snake_case__=12, snake_case__=1, snake_case__=30_72, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=5_14, snake_case__=1, snake_case__=1E-05, snake_case__=1, snake_case__=0, snake_case__=2, snake_case__="absolute", snake_case__=True, **snake_case__, ) -> Tuple:
"""simple docstring"""
super().__init__(**snake_case__ )
lowercase_ : Dict = vocab_size
lowercase_ : int = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : List[str] = hidden_act
lowercase_ : str = initializer_factor
lowercase_ : Dict = intermediate_size
lowercase_ : int = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : int = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : str = position_embedding_type
lowercase_ : Optional[int] = use_cache
lowercase_ : List[str] = pad_token_id
lowercase_ : str = bos_token_id
lowercase_ : str = eos_token_id
@classmethod
def snake_case__ ( cls, snake_case__, **snake_case__ ) -> "PretrainedConfig":
"""simple docstring"""
lowercase_ , lowercase_ : str = cls.get_config_dict(snake_case__, **snake_case__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowercase_ : Dict = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__, **snake_case__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Tuple = """bridgetower"""
def __init__( self, snake_case__=True, snake_case__="gelu", snake_case__=7_68, snake_case__=1, snake_case__=1E-05, snake_case__=False, snake_case__="add", snake_case__=12, snake_case__=6, snake_case__=False, snake_case__=False, snake_case__=None, snake_case__=None, **snake_case__, ) -> Tuple:
"""simple docstring"""
# TODO: remove this once the Hub files are updated.
lowercase_ : Optional[int] = kwargs.pop("""text_config_dict""", snake_case__ )
lowercase_ : Union[str, Any] = kwargs.pop("""vision_config_dict""", snake_case__ )
super().__init__(**snake_case__ )
lowercase_ : Union[str, Any] = share_cross_modal_transformer_layers
lowercase_ : List[str] = hidden_act
lowercase_ : Dict = hidden_size
lowercase_ : List[str] = initializer_factor
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Tuple = share_link_tower_layers
lowercase_ : Tuple = link_tower_type
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Union[str, Any] = tie_word_embeddings
lowercase_ : int = init_layernorm_from_vision_encoder
if text_config is None:
lowercase_ : Optional[int] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowercase_ : List[str] = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowercase_ : int = BridgeTowerTextConfig(**snake_case__ )
lowercase_ : List[Any] = BridgeTowerVisionConfig(**snake_case__ )
@classmethod
def snake_case__ ( cls, snake_case__, snake_case__, **snake_case__ ) -> List[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **snake_case__ )
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Tuple = copy.deepcopy(self.__dict__ )
lowercase_ : str = self.text_config.to_dict()
lowercase_ : Dict = self.vision_config.to_dict()
lowercase_ : List[Any] = self.__class__.model_type
return output
| 436
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __magic_name__ ( lowercase ) -> str:
"""simple docstring"""
if isinstance(lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class UpperCamelCase__ :
'''simple docstring'''
def snake_case__ ( self, snake_case__, snake_case__ ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
pass
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.abs((a - b) ).max()
self.assertLessEqual(snake_case__, snake_case__, f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=None, **snake_case__ ) -> List[Any]:
"""simple docstring"""
lowercase_ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__, snake_case__ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel(snake_case__ )
lowercase_ : Tuple = model(input_ids=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__ )
self.assertEqual(output["""text_embeds"""].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=None, **snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ , lowercase_ : Any = self.get_vision_text_model(snake_case__, snake_case__ )
lowercase_ : List[str] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowercase_ : Any = model(input_ids=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__ )
self.assertEqual(output["""text_embeds"""].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=None, **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowercase_ , lowercase_ : Any = self.get_vision_text_model(snake_case__, snake_case__ )
lowercase_ : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowercase_ : int = model(input_ids=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__ )
lowercase_ : List[Any] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowercase_ : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
lowercase_ : int = model(input_ids=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__ )
lowercase_ : Tuple = after_output[0]
lowercase_ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__, 1E-3 )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=None, **snake_case__ ) -> int:
"""simple docstring"""
lowercase_ , lowercase_ : List[Any] = self.get_vision_text_model(snake_case__, snake_case__ )
lowercase_ : Any = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
lowercase_ : Tuple = model(
input_ids=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__, output_attentions=snake_case__ )
lowercase_ : Any = output.vision_model_output.attentions
self.assertEqual(len(snake_case__ ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Optional[Any] = to_atuple(vision_model.config.image_size )
lowercase_ : Tuple = to_atuple(vision_model.config.patch_size )
lowercase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase_ : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase_ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(snake_case__ ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ ) -> int:
"""simple docstring"""
pt_model.to(snake_case__ )
pt_model.eval()
# prepare inputs
lowercase_ : Optional[Any] = inputs_dict
lowercase_ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase_ : Dict = pt_model(**snake_case__ ).to_tuple()
lowercase_ : List[Any] = fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ), len(snake_case__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(snake_case__, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
lowercase_ : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__, from_pt=snake_case__ )
lowercase_ : str = fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ), len(snake_case__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(snake_case__, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
lowercase_ : str = VisionTextDualEncoderModel.from_pretrained(snake_case__, from_flax=snake_case__ )
pt_model_loaded.to(snake_case__ )
pt_model_loaded.eval()
with torch.no_grad():
lowercase_ : Tuple = pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ), len(snake_case__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(snake_case__, pt_output_loaded.numpy(), 4E-2 )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__, snake_case__ )
lowercase_ : Any = VisionTextDualEncoderModel(snake_case__ )
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel(snake_case__ )
lowercase_ : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), snake_case__ )
lowercase_ : str = fx_state
self.check_pt_flax_equivalence(snake_case__, snake_case__, snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__, snake_case__ )
lowercase_ : Optional[int] = VisionTextDualEncoderModel(snake_case__ )
lowercase_ : Optional[Any] = FlaxVisionTextDualEncoderModel(snake_case__ )
lowercase_ : str = load_flax_weights_in_pytorch_model(snake_case__, fx_model.params )
self.check_pt_flax_equivalence(snake_case__, snake_case__, snake_case__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case__ )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**snake_case__ )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case__ )
@is_pt_flax_cross_test
def snake_case__ ( self ) -> int:
"""simple docstring"""
lowercase_ : str = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = config_inputs_dict.pop("""vision_config""" )
lowercase_ : Union[str, Any] = config_inputs_dict.pop("""text_config""" )
lowercase_ : Tuple = config_inputs_dict
self.check_equivalence_pt_to_flax(snake_case__, snake_case__, snake_case__ )
self.check_equivalence_flax_to_pt(snake_case__, snake_case__, snake_case__ )
@slow
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ , lowercase_ : Any = self.get_pretrained_model_and_inputs()
lowercase_ : Any = model_a(**snake_case__ )
lowercase_ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case__ )
lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
lowercase_ : Optional[int] = model_a(**snake_case__ )
lowercase_ : str = after_outputs[0]
lowercase_ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__, 1E-5 )
@require_flax
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> int:
"""simple docstring"""
lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""", """hf-internal-testing/tiny-bert""", vision_from_pt=snake_case__, text_from_pt=snake_case__, )
lowercase_ : Optional[Any] = 13
lowercase_ : List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : Any = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowercase_ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowercase_ : Dict = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case__ ( self, snake_case__, snake_case__ ) -> Any:
"""simple docstring"""
lowercase_ : List[Any] = FlaxViTModel(snake_case__ )
lowercase_ : Optional[int] = FlaxBertModel(snake_case__ )
return vision_model, text_model
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : int = FlaxViTModelTester(self )
lowercase_ : int = FlaxBertModelTester(self )
lowercase_ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowercase_ : str = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : List[str] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""", """hf-internal-testing/tiny-bert""", vision_from_pt=snake_case__, text_from_pt=snake_case__, )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowercase_ : Optional[int] = random_attention_mask([batch_size, 4] )
lowercase_ : Optional[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def snake_case__ ( self, snake_case__, snake_case__ ) -> int:
"""simple docstring"""
lowercase_ : Union[str, Any] = FlaxCLIPVisionModel(snake_case__ )
lowercase_ : int = FlaxBertModel(snake_case__ )
return vision_model, text_model
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : int = FlaxCLIPVisionModelTester(self )
lowercase_ : List[str] = FlaxBertModelTester(self )
lowercase_ : Any = clip_model_tester.prepare_config_and_inputs()
lowercase_ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : str = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""", logit_scale_init_value=1.0 )
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowercase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Optional[int] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""], images=snake_case__, padding=snake_case__, return_tensors="""np""" )
lowercase_ : Dict = model(**snake_case__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowercase_ : List[str] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, snake_case__, atol=1E-3 ) )
| 436
| 1
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
def wrapper(*__lowerCAmelCase , **__lowerCAmelCase ):
snake_case__ = timeit.default_timer()
snake_case__ = func(*__lowerCAmelCase , **__lowerCAmelCase )
snake_case__ = timeit.default_timer() - starttime
return delta
snake_case__ = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=None ):
snake_case__ = []
snake_case__ = seq_shapes or {}
for i in range(__lowerCAmelCase ):
snake_case__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCAmelCase , _ArrayXD ):
snake_case__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
snake_case__ = "The small grey turtle was surprisingly fast when challenged."
else:
snake_case__ = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCAmelCase , datasets.Sequence ):
while isinstance(__lowerCAmelCase , datasets.Sequence ):
snake_case__ = v.feature
snake_case__ = seq_shapes[k]
snake_case__ = np.random.rand(*__lowerCAmelCase ).astype(v.dtype )
snake_case__ = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=None ):
snake_case__ = generate_examples(__lowerCAmelCase , num_examples=__lowerCAmelCase , seq_shapes=__lowerCAmelCase )
with ArrowWriter(features=__lowerCAmelCase , path=__lowerCAmelCase ) as writer:
for key, record in dummy_data:
snake_case__ = features.encode_example(__lowerCAmelCase )
writer.write(__lowerCAmelCase )
snake_case__ , snake_case__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
snake_case__ = datasets.Dataset.from_file(filename=__lowerCAmelCase , info=datasets.DatasetInfo(features=__lowerCAmelCase ) )
return dataset
| 276
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__magic_name__ = 299_792_458
# Symbols
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = symbols('''ct x y z''')
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return 1 / sqrt(1 - beta(__lowerCAmelCase ) ** 2 )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return np.array(
[
[gamma(__lowerCAmelCase ), -gamma(__lowerCAmelCase ) * beta(__lowerCAmelCase ), 0, 0],
[-gamma(__lowerCAmelCase ) * beta(__lowerCAmelCase ), gamma(__lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase = None ):
# Ensure event is not empty
if event is None:
snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__magic_name__ = transform(29_979_245)
print('''Example of four vector: ''')
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__magic_name__ = {ct: c, x: 1, y: 1, z: 1}
__magic_name__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 276
| 1
|
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase = logging.get_logger(__name__)
def lowercase ( a__ : List[str] , a__ : Optional[Any] , a__ : List[Any] , a__ : str ) -> Tuple[int, int]:
def constraint_to_multiple_of(a__ : int , a__ : Tuple , a__ : Optional[int]=0 , a__ : int=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''pixel_values''']
def __init__( self : Any , __UpperCamelCase : Any = True , __UpperCamelCase : Dict = None , __UpperCamelCase : str = PILImageResampling.BILINEAR , __UpperCamelCase : str = False , __UpperCamelCase : Optional[Any] = 1 , __UpperCamelCase : str = True , __UpperCamelCase : Tuple = 1 / 255 , __UpperCamelCase : Any = True , __UpperCamelCase : Any = None , __UpperCamelCase : Any = None , **__UpperCamelCase : Union[str, Any] , ) -> Optional[Any]:
super().__init__(**__lowerCAmelCase )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(__lowerCAmelCase )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict = False , __UpperCamelCase : List[Any] = 1 , __UpperCamelCase : int = PILImageResampling.BICUBIC , __UpperCamelCase : List[str] = None , **__UpperCamelCase : Dict , ) -> List[Any]:
_UpperCamelCase = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_UpperCamelCase = get_resize_output_image_size(
__lowerCAmelCase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__lowerCAmelCase , multiple=__lowerCAmelCase , )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _UpperCamelCase ( self : str , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any = None , **__UpperCamelCase : int , ) -> Optional[int]:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Optional[Any] , ) -> Dict:
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Dict = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = None , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : Tuple = None , __UpperCamelCase : Tuple = None , __UpperCamelCase : str = None , __UpperCamelCase : Tuple = None , __UpperCamelCase : Dict = None , __UpperCamelCase : List[Any] = None , __UpperCamelCase : str = ChannelDimension.FIRST , **__UpperCamelCase : Optional[Any] , ) -> Union[str, Any]:
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__lowerCAmelCase )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
def _UpperCamelCase ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] = None ) -> List[str]:
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowerCAmelCase ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(__lowerCAmelCase ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowerCAmelCase )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowerCAmelCase )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 707
|
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase ( a__ : np.ndarray ) -> bool:
return np.array_equal(a__ , matrix.conjugate().T )
def lowercase ( a__ : np.ndarray , a__ : np.ndarray ) -> Any:
_UpperCamelCase = v.conjugate().T
_UpperCamelCase = v_star.dot(a__ )
assert isinstance(a__ , np.ndarray )
return (v_star_dot.dot(a__ )) / (v_star.dot(a__ ))
def lowercase ( ) -> None:
_UpperCamelCase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(a__ ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(a__ , a__ ) )
_UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(a__ ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(a__ , a__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 342
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Tuple= tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__: Optional[Any]= ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
SCREAMING_SNAKE_CASE__: Dict= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__: Union[str, Any]= ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
SCREAMING_SNAKE_CASE__: Optional[int]= {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__: Any= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__: List[str]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Optional[int]= {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__: Any= os.path.join(self.tmpdirname , lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Optional[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Optional[int]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__: Optional[Any]= [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.get_image_processor()
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__: str= CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__: Dict= CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__: Dict= self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__: List[str]= self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Any= self.get_image_processor()
SCREAMING_SNAKE_CASE__: Any= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__: Any= image_processor(lowerCAmelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__: str= processor(images=lowerCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_image_processor()
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: int= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= '''lower newer'''
SCREAMING_SNAKE_CASE__: List[Any]= processor(text=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.get_image_processor()
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: int= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= '''lower newer'''
SCREAMING_SNAKE_CASE__: Dict= self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__: Dict= processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_image_processor()
SCREAMING_SNAKE_CASE__: List[Any]= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: Optional[Any]= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__: Optional[Any]= self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__: List[str]= processor(images=lowerCAmelCase , visual_prompt=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[str]= self.get_image_processor()
SCREAMING_SNAKE_CASE__: Tuple= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: Optional[Any]= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__: Any= processor.batch_decode(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
| 64
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: float , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: bool = False , ) -> Any:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : str = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[Any] = nn.Dropout(p=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = TaConfig(
vocab_size=_SCREAMING_SNAKE_CASE , d_model=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , feed_forward_proj=_SCREAMING_SNAKE_CASE , is_decoder=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = nn.ModuleList()
for lyr_num in range(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = TaBlock(_SCREAMING_SNAKE_CASE)
self.encoders.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = TaLayerNorm(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = nn.Dropout(p=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.token_embedder(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = encoder_input_tokens.shape[1]
__lowerCAmelCase : List[Any] = torch.arange(_SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device)
x += self.position_encoding(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.dropout_pre(_SCREAMING_SNAKE_CASE)
# inverted the attention mask
__lowerCAmelCase : List[Any] = encoder_input_tokens.size()
__lowerCAmelCase : Any = self.get_extended_attention_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
for lyr in self.encoders:
__lowerCAmelCase : Union[str, Any] = lyr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : int = self.layer_norm(_SCREAMING_SNAKE_CASE)
return self.dropout_post(_SCREAMING_SNAKE_CASE), encoder_inputs_mask
| 293
| 0
|
'''simple docstring'''
import argparse
import copy
def A_( A : Optional[int]):
UpperCamelCase = {}
with open(A) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[1], line.split()[2]])
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]])
if line.split()[1] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[0], line.split()[2]])
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]])
return dict_of_neighbours
def A_( A : Union[str, Any] , A : str):
with open(A) as f:
UpperCamelCase = f.read(1)
UpperCamelCase = start_node
UpperCamelCase = []
UpperCamelCase = start_node
UpperCamelCase = 0
while visiting not in first_solution:
UpperCamelCase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1]) < int(A) and k[0] not in first_solution:
UpperCamelCase = k[1]
UpperCamelCase = k[0]
first_solution.append(A)
UpperCamelCase = distance_of_first_solution + int(A)
UpperCamelCase = best_node
first_solution.append(A)
UpperCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1])
- 1_0000
)
return first_solution, distance_of_first_solution
def A_( A : List[Any] , A : str):
UpperCamelCase = []
for n in solution[1:-1]:
UpperCamelCase = solution.index(A)
for kn in solution[1:-1]:
UpperCamelCase = solution.index(A)
if n == kn:
continue
UpperCamelCase = copy.deepcopy(A)
UpperCamelCase = kn
UpperCamelCase = n
UpperCamelCase = 0
for k in _tmp[:-1]:
UpperCamelCase = _tmp[_tmp.index(A) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCamelCase = distance + int(i[1])
_tmp.append(A)
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp)
UpperCamelCase = len(neighborhood_of_solution[0]) - 1
neighborhood_of_solution.sort(key=lambda A: x[index_of_last_item_in_the_list])
return neighborhood_of_solution
def A_( A : List[str] , A : Any , A : Any , A : List[Any] , A : Any):
UpperCamelCase = 1
UpperCamelCase = first_solution
UpperCamelCase = []
UpperCamelCase = distance_of_first_solution
UpperCamelCase = solution
while count <= iters:
UpperCamelCase = find_neighborhood(A , A)
UpperCamelCase = 0
UpperCamelCase = neighborhood[index_of_best_solution]
UpperCamelCase = len(A) - 1
UpperCamelCase = False
while not found:
UpperCamelCase = 0
while i < len(A):
if best_solution[i] != solution[i]:
UpperCamelCase = best_solution[i]
UpperCamelCase = solution[i]
break
UpperCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node])
UpperCamelCase = True
UpperCamelCase = best_solution[:-1]
UpperCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCamelCase = cost
UpperCamelCase = solution
else:
UpperCamelCase = index_of_best_solution + 1
UpperCamelCase = neighborhood[index_of_best_solution]
if len(A) >= size:
tabu_list.pop(0)
UpperCamelCase = count + 1
return best_solution_ever, best_cost
def A_( A : Optional[Any]=None):
UpperCamelCase = generate_neighbours(args.File)
UpperCamelCase , UpperCamelCase = generate_first_solution(
args.File , A)
UpperCamelCase , UpperCamelCase = tabu_search(
A , A , A , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''')
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 711
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A_( A : Tuple):
UpperCamelCase = torch.exp(A)
UpperCamelCase = torch.sum(A , dim=1) # sum of exp(x_i)
UpperCamelCase = torch.sum(x * exp_x , dim=1) # sum of x_i * exp(x_i)
return torch.log(A) - B / A
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> List[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase = config.output_attentions
UpperCamelCase = config.output_hidden_states
UpperCamelCase = nn.ModuleList([BertLayer(A_ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = nn.ModuleList([BertHighway(A_ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
if (type(A_ ) is float) or (type(A_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCamelCase = x
else:
UpperCamelCase = x
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase_ ( self , A_ , A_=None , A_=None , A_=None , A_=None , )-> Tuple:
'''simple docstring'''
UpperCamelCase = ()
UpperCamelCase = ()
UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = layer_module(
A_ , A_ , head_mask[i] , A_ , A_ )
UpperCamelCase = layer_outputs[0]
if self.output_attentions:
UpperCamelCase = all_attentions + (layer_outputs[1],)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = current_outputs + (all_attentions,)
UpperCamelCase = self.highway[i](A_ )
# logits, pooled_output
if not self.training:
UpperCamelCase = highway_exit[0]
UpperCamelCase = entropy(A_ )
UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A_ , i + 1 )
else:
UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = outputs + (all_attentions,)
UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Dict:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config
UpperCamelCase = BertEmbeddings(A_ )
UpperCamelCase = DeeBertEncoder(A_ )
UpperCamelCase = BertPooler(A_ )
self.init_weights()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase_ ( self , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = value
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A_ )
@add_start_docstrings_to_model_forward(A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , )-> List[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase = torch.ones(A_ , device=A_ )
if encoder_attention_mask is None:
UpperCamelCase = torch.ones(A_ , device=A_ )
if token_type_ids is None:
UpperCamelCase = torch.zeros(A_ , dtype=torch.long , device=A_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase = self.get_extended_attention_mask(A_ , A_ , A_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCamelCase = encoder_attention_mask[:, None, None, :]
UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase = self.get_head_mask(A_ , self.config.num_hidden_layers )
UpperCamelCase = self.embeddings(
input_ids=A_ , position_ids=A_ , token_type_ids=A_ , inputs_embeds=A_ )
UpperCamelCase = self.encoder(
A_ , attention_mask=A_ , head_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = message
UpperCamelCase = exit_layer # start from 1!
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> Dict:
'''simple docstring'''
super().__init__()
UpperCamelCase = BertPooler(A_ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# "return" pooler_output
# BertModel
UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCamelCase = bmodel_output[1]
UpperCamelCase = self.dropout(A_ )
UpperCamelCase = self.classifier(A_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Tuple:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeBertModel(A_ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=-1 , A_=False , )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.bert(
A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(A_ )
UpperCamelCase = self.classifier(A_ )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(A_ )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 432
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : bool , __magic_name__ : list[int] , __magic_name__ : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(__magic_name__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __magic_name__ , __magic_name__ , __magic_name__ ) , minimax(depth + 1 , node_index * 2 + 1 , __magic_name__ , __magic_name__ , __magic_name__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , __magic_name__ , __magic_name__ , __magic_name__ ) , minimax(depth + 1 , node_index * 2 + 1 , __magic_name__ , __magic_name__ , __magic_name__ ) , )
def _lowerCAmelCase ( ) -> None:
lowercase : Tuple =[90, 23, 6, 33, 21, 65, 123, 34423]
lowercase : str =math.log(len(__magic_name__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , __magic_name__ , __magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 92
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase : Union[str, Any] =img
lowercase : Union[str, Any] =img.shape[1]
lowercase : str =img.shape[0]
lowercase : Union[str, Any] =dst_width
lowercase : str =dst_height
lowercase : str =self.src_w / self.dst_w
lowercase : Optional[Any] =self.src_h / self.dst_h
lowercase : int =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 800, 600
UpperCamelCase_ = imread("""image_data/lena.jpg""", 1)
UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 92
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCamelCase_ : str = pd.read_csv("""sample_data.csv""", header=None)
lowerCamelCase_ : str = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCamelCase_ : Any = df.iloc[:, 1:2]
lowerCamelCase_ : str = actual_data.values.reshape(len_data, 1)
lowerCamelCase_ : Any = MinMaxScaler().fit_transform(actual_data)
lowerCamelCase_ : Tuple = 10
lowerCamelCase_ : Tuple = 5
lowerCamelCase_ : Union[str, Any] = 20
lowerCamelCase_ : Optional[int] = len_data - periods * look_back
lowerCamelCase_ : int = actual_data[:division]
lowerCamelCase_ : List[Any] = actual_data[division - look_back :]
lowerCamelCase_ : int = [], []
lowerCamelCase_ : Optional[int] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCamelCase_ : Tuple = np.array(train_x)
lowerCamelCase_ : Any = np.array(test_x)
lowerCamelCase_ : Tuple = np.array([list(i.ravel()) for i in train_y])
lowerCamelCase_ : List[str] = np.array([list(i.ravel()) for i in test_y])
lowerCamelCase_ : List[str] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
lowerCamelCase_ : int = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowerCamelCase_ : List[str] = model.predict(x_test)
| 711
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
lowerCamelCase_ : Dict = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
lowerCamelCase_ : Optional[int] = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase_ : Union[str, Any] = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 246
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.