code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Optional[int] = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''wav2vec2'''
def __init__( self : str , lowercase_ : Optional[Any]=32 , lowercase_ : str=768 , lowercase_ : Union[str, Any]=12 , lowercase_ : Any=12 , lowercase_ : Optional[int]=3072 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Dict=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.02 , lowercase_ : Union[str, Any]=1E-5 , lowercase_ : Dict="group" , lowercase_ : str="gelu" , lowercase_ : Dict=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : str=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : Any=False , lowercase_ : List[Any]=128 , lowercase_ : List[Any]=16 , lowercase_ : Tuple=False , lowercase_ : List[str]=True , lowercase_ : Dict=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Optional[int]=2 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=10 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=320 , lowercase_ : Any=2 , lowercase_ : Dict=0.1 , lowercase_ : Optional[int]=100 , lowercase_ : Optional[Any]=256 , lowercase_ : Tuple=256 , lowercase_ : List[str]=0.1 , lowercase_ : Any="sum" , lowercase_ : List[str]=False , lowercase_ : int=False , lowercase_ : Union[str, Any]=256 , lowercase_ : Optional[Any]=(512, 512, 512, 512, 1500) , lowercase_ : Optional[int]=(5, 3, 3, 1, 1) , lowercase_ : str=(1, 2, 3, 1, 1) , lowercase_ : Union[str, Any]=512 , lowercase_ : List[str]=0 , lowercase_ : Tuple=1 , lowercase_ : Dict=2 , lowercase_ : Dict=False , lowercase_ : List[str]=3 , lowercase_ : List[str]=2 , lowercase_ : Optional[Any]=3 , lowercase_ : Tuple=None , lowercase_ : str=None , **lowercase_ : str , ):
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase_ : Optional[Any] = hidden_size
lowercase_ : int = feat_extract_norm
lowercase_ : Optional[int] = feat_extract_activation
lowercase_ : int = list(lowercase_ )
lowercase_ : Any = list(lowercase_ )
lowercase_ : List[str] = list(lowercase_ )
lowercase_ : Optional[Any] = conv_bias
lowercase_ : Any = num_conv_pos_embeddings
lowercase_ : Optional[int] = num_conv_pos_embedding_groups
lowercase_ : Any = len(self.conv_dim )
lowercase_ : int = num_hidden_layers
lowercase_ : Dict = intermediate_size
lowercase_ : Optional[Any] = hidden_act
lowercase_ : int = num_attention_heads
lowercase_ : Any = hidden_dropout
lowercase_ : str = attention_dropout
lowercase_ : int = activation_dropout
lowercase_ : Any = feat_proj_dropout
lowercase_ : Dict = final_dropout
lowercase_ : int = layerdrop
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : Dict = initializer_range
lowercase_ : Optional[int] = vocab_size
lowercase_ : List[str] = do_stable_layer_norm
lowercase_ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ : str = apply_spec_augment
lowercase_ : Optional[int] = mask_time_prob
lowercase_ : Any = mask_time_length
lowercase_ : Any = mask_time_min_masks
lowercase_ : Optional[Any] = mask_feature_prob
lowercase_ : Tuple = mask_feature_length
lowercase_ : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase_ : str = num_codevectors_per_group
lowercase_ : int = num_codevector_groups
lowercase_ : Optional[Any] = contrastive_logits_temperature
lowercase_ : Optional[Any] = feat_quantizer_dropout
lowercase_ : Optional[Any] = num_negatives
lowercase_ : Any = codevector_dim
lowercase_ : Any = proj_codevector_dim
lowercase_ : Dict = diversity_loss_weight
# ctc loss
lowercase_ : Dict = ctc_loss_reduction
lowercase_ : Dict = ctc_zero_infinity
# adapter
lowercase_ : List[str] = add_adapter
lowercase_ : Union[str, Any] = adapter_kernel_size
lowercase_ : List[Any] = adapter_stride
lowercase_ : Optional[int] = num_adapter_layers
lowercase_ : Dict = output_hidden_size or hidden_size
lowercase_ : Any = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase_ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase_ : str = list(lowercase_ )
lowercase_ : Dict = list(lowercase_ )
lowercase_ : int = list(lowercase_ )
lowercase_ : Optional[int] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 30
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 1
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowercase : Optional[int] = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
_lowercase : int = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
_lowercase : Tuple = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowercase : str = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
_lowercase : int = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) -> List[Any]:
for tf_name, hf_name in patterns:
lowercase_ : Optional[Any] = k.replace(UpperCAmelCase__ , UpperCAmelCase__ )
return k
def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : dict ) -> BigBirdPegasusForConditionalGeneration:
lowercase_ : Union[str, Any] = BigBirdPegasusConfig(**UpperCAmelCase__ )
lowercase_ : List[str] = BigBirdPegasusForConditionalGeneration(UpperCAmelCase__ )
lowercase_ : List[Any] = torch_model.state_dict()
lowercase_ : Dict = {}
# separating decoder weights
lowercase_ : Optional[int] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
lowercase_ : List[str] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
lowercase_ : Dict = [k.endswith(UpperCAmelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase__ ):
continue
lowercase_ : Any = DECODER_PATTERNS
lowercase_ : Tuple = rename_state_dict_key(UpperCAmelCase__ , UpperCAmelCase__ )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowercase_ : Any = v.T
lowercase_ : Optional[Any] = torch.from_numpy(UpperCAmelCase__ )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
lowercase_ : Any = [k.endswith(UpperCAmelCase__ ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase__ ):
continue
lowercase_ : str = REMAINING_PATTERNS
lowercase_ : Optional[int] = rename_state_dict_key(UpperCAmelCase__ , UpperCAmelCase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowercase_ : Any = v.T
lowercase_ : List[str] = torch.from_numpy(UpperCAmelCase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
lowercase_ : int = mapping["""model.embed_positions.weight"""]
lowercase_ : List[str] = mapping.pop("""model.embed_positions.weight""" )
lowercase_ , lowercase_ : List[Any] = torch_model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
lowercase_ : Optional[Any] = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Dict:
lowercase_ : List[Any] = tf.train.list_variables(UpperCAmelCase__ )
lowercase_ : Optional[int] = {}
lowercase_ : int = ["""global_step"""]
for name, shape in tqdm(UpperCAmelCase__ , desc="""converting tf checkpoint to dict""" ):
lowercase_ : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ : List[str] = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = array
return tf_weights
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : dict ) -> Optional[int]:
lowercase_ : List[Any] = get_tf_weights_as_numpy(UpperCAmelCase__ )
lowercase_ : Any = convert_bigbird_pegasus(UpperCAmelCase__ , UpperCAmelCase__ )
torch_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowercase : Optional[Any] = parser.parse_args()
_lowercase : Any = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 30
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Any = 0
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowercase_ : Tuple = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowercase_ : str = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase_ ) , 0 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : int = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
# Check that tokenizer_type ≠ model_type
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ , config=lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase_ , """vocab.txt""" ) )
lowercase_ : Dict = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""bert""" , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase_ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase_ , """merges.txt""" ) )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""gpt2""" , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase_ , """vocab.txt""" ) )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase_ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase_ , """merges.txt""" ) )
lowercase_ : Tuple = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
with pytest.raises(lowercase_ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowercase_ : Dict = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase_ , lowercase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase_ )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase_ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
lowercase_ : Union[str, Any] = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowercase_ : Union[str, Any] = TOKENIZER_MAPPING.values()
lowercase_ : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase_ )
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ ) , lowercase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase_ )
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[str] = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase_ )
lowercase_ : Union[str, Any] = """Hello, world. How are you?"""
lowercase_ : Optional[Any] = tokenizer.tokenize(lowercase_ )
self.assertEqual("""[UNK]""" , tokens[0] )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase_ )
lowercase_ : Optional[int] = tokenizer.tokenize(lowercase_ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[str] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase_ ) , lowercase_ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
# Check we can load the tokenizer config of an online model.
lowercase_ : str = get_tokenizer_config("""bert-base-cased""" )
lowercase_ : Optional[Any] = config.pop("""_commit_hash""" , lowercase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase_ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowercase_ : Optional[Any] = get_tokenizer_config(lowercase_ )
self.assertDictEqual(lowercase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowercase_ : Tuple = get_tokenizer_config(lowercase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
try:
AutoConfig.register("""custom""" , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
lowercase_ : int = CustomTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowercase_ : str = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
try:
AutoConfig.register("""custom""" , lowercase_ )
# Can register in two steps
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase_ , slow_tokenizer_class=lowercase_ , fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained(lowercase_ )
bert_tokenizer.save_pretrained(lowercase_ )
lowercase_ : Dict = CustomTokenizerFast.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowercase_ : int = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
lowercase_ : List[str] = AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE_ ( self : int ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
lowercase_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
lowercase_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ )
lowercase_ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = False
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = NewTokenizer
UpperCamelCase__ = False
try:
AutoConfig.register("""custom""" , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
# If remote code is not set, the default is to use local
lowercase_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
lowercase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
lowercase_ : Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
with self.assertRaisesRegex(
lowercase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
with self.assertRaisesRegex(
lowercase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowercase_ : List[str] = AutoTokenizer.from_pretrained(lowercase_ , revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# Make sure we have cached the tokenizer.
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
lowercase_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 30
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( ) -> Any:
lowercase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(UpperCAmelCase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 30
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowercase : str = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> str:
lowercase_ : int = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> Tuple:
lowercase_ , lowercase_ : int = emb.weight.shape
lowercase_ : Any = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ : Dict = emb.weight.data
return lin_layer
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> Dict:
lowercase_ : int = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
lowercase_ : str = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
lowercase_ : List[Any] = mam_aaa["""model"""]
remove_ignore_keys_(UpperCAmelCase__ )
lowercase_ : Optional[Any] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowercase_ : int = MaMaaaConfig(
vocab_size=UpperCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowercase_ : List[str] = state_dict["""decoder.embed_tokens.weight"""]
lowercase_ : Optional[Any] = MaMaaaForConditionalGeneration(UpperCAmelCase__ )
model.model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
lowercase_ : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowercase : Any = parser.parse_args()
_lowercase : Dict = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import math
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowercase_ : Optional[Any] = range(3 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]=1 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : Optional[Any] = factor * value
lowercase_ : Dict = value
while not is_prime(UpperCAmelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **UpperCAmelCase__ )
return value
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowercase : Optional[Any] = logging.get_logger(__name__)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 30
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowercase : str = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=None ) -> List[str]:
# Initialise PyTorch model
lowercase_ : Optional[Any] = XLNetConfig.from_json_file(UpperCAmelCase__ )
lowercase_ : int = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
lowercase_ : int = finetuning_task
lowercase_ : List[str] = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(UpperCAmelCase__ )
elif "squad" in finetuning_task:
lowercase_ : Union[str, Any] = finetuning_task
lowercase_ : str = XLNetForQuestionAnswering(UpperCAmelCase__ )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
lowercase_ : str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_lowercase : Tuple = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
_lowercase : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowercase : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCamelCase ( UpperCAmelCase__ : list[float] ) -> list[float]:
lowercase_ : Dict = []
lowercase_ : str = len(UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
lowercase_ : float = -1
for j in range(i + 1 , UpperCAmelCase__ ):
if arr[i] < arr[j]:
lowercase_ : int = arr[j]
break
result.append(UpperCAmelCase__ )
return result
def lowerCamelCase ( UpperCAmelCase__ : list[float] ) -> list[float]:
lowercase_ : str = []
for i, outer in enumerate(UpperCAmelCase__ ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Any = inner
break
result.append(UpperCAmelCase__ )
return result
def lowerCamelCase ( UpperCAmelCase__ : list[float] ) -> list[float]:
lowercase_ : str = len(UpperCAmelCase__ )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(UpperCAmelCase__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Union[str, Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowercase : List[Any] = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Dict = logging.get_logger(__name__)
set_seed(770)
_lowercase : List[Any] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
_lowercase : Dict = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
_lowercase : Union[str, Any] = os.path.dirname(os.path.abspath(__file__))
_lowercase : int = os.path.join(os.path.expanduser("~"), ".cache")
_lowercase : List[str] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Any=False ) -> int:
lowercase_ : Tuple = model_type
if use_small:
key += "_small"
return os.path.join(UpperCAmelCase__ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> int:
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
hf_hub_download(repo_id=UpperCAmelCase__ , filename=UpperCAmelCase__ , local_dir=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]="text" ) -> str:
if model_type == "text":
lowercase_ : List[Any] = BarkSemanticModel
lowercase_ : str = BarkSemanticConfig
lowercase_ : Union[str, Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowercase_ : Union[str, Any] = BarkCoarseModel
lowercase_ : List[str] = BarkCoarseConfig
lowercase_ : int = BarkCoarseGenerationConfig
elif model_type == "fine":
lowercase_ : Tuple = BarkFineModel
lowercase_ : int = BarkFineConfig
lowercase_ : Optional[int] = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowercase_ : List[str] = F'''{model_type}_small''' if use_small else model_type
lowercase_ : Optional[int] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCAmelCase__ ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
lowercase_ : int = torch.load(UpperCAmelCase__ , map_location=UpperCAmelCase__ )
# this is a hack
lowercase_ : int = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
lowercase_ : Optional[Any] = model_args["""vocab_size"""]
lowercase_ : Dict = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowercase_ : Dict = model_args.pop("""n_head""" )
lowercase_ : Optional[int] = model_args.pop("""n_embd""" )
lowercase_ : Optional[Any] = model_args.pop("""n_layer""" )
lowercase_ : Union[str, Any] = ConfigClass(**checkpoint["""model_args"""] )
lowercase_ : List[str] = ModelClass(config=UpperCAmelCase__ )
lowercase_ : int = GenerationConfigClass()
lowercase_ : List[Any] = model_generation_config
lowercase_ : str = checkpoint["""model"""]
# fixup checkpoint
lowercase_ : Any = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(UpperCAmelCase__ ):
# replace part of the key with corresponding layer name in HF implementation
lowercase_ : str = k[len(UpperCAmelCase__ ) :]
for old_layer_name in new_layer_name_dict:
lowercase_ : str = new_k.replace(UpperCAmelCase__ , new_layer_name_dict[old_layer_name] )
lowercase_ : Dict = state_dict.pop(UpperCAmelCase__ )
lowercase_ : str = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowercase_ : str = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
lowercase_ : List[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowercase_ : List[str] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(UpperCAmelCase__ ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(UpperCAmelCase__ ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
lowercase_ : Dict = model.num_parameters(exclude_embeddings=UpperCAmelCase__ )
lowercase_ : Union[str, Any] = checkpoint["""best_val_loss"""].item()
logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCAmelCase__ , 3 )} loss''' )
model.eval()
model.to(UpperCAmelCase__ )
del checkpoint, state_dict
return model
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Dict="text" ) -> str:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowercase_ : str = """cpu""" # do conversion on cpu
lowercase_ : List[str] = _get_ckpt_path(UpperCAmelCase__ , use_small=UpperCAmelCase__ )
lowercase_ : Union[str, Any] = _load_model(UpperCAmelCase__ , UpperCAmelCase__ , model_type=UpperCAmelCase__ , use_small=UpperCAmelCase__ )
# load bark initial model
lowercase_ : Union[str, Any] = _bark_load_model(UpperCAmelCase__ , """cpu""" , model_type=UpperCAmelCase__ , use_small=UpperCAmelCase__ )
if model_type == "text":
lowercase_ : List[Any] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=UpperCAmelCase__ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
lowercase_ : str = 5
lowercase_ : Any = 10
if model_type in ["text", "coarse"]:
lowercase_ : List[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowercase_ : Tuple = bark_model(UpperCAmelCase__ )[0]
lowercase_ : List[Any] = model(UpperCAmelCase__ )
# take last logits
lowercase_ : Dict = output_new_model_total.logits[:, [-1], :]
else:
lowercase_ : Dict = 3
lowercase_ : Dict = 8
lowercase_ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowercase_ : int = model(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Tuple = bark_model(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , ) -> List[str]:
lowercase_ : Dict = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : str = BarkSemanticConfig.from_pretrained(os.path.join(UpperCAmelCase__ , """config.json""" ) )
lowercase_ : Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(UpperCAmelCase__ , """config.json""" ) )
lowercase_ : List[Any] = BarkFineConfig.from_pretrained(os.path.join(UpperCAmelCase__ , """config.json""" ) )
lowercase_ : Any = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
lowercase_ : List[str] = BarkSemanticModel.from_pretrained(UpperCAmelCase__ )
lowercase_ : str = BarkCoarseModel.from_pretrained(UpperCAmelCase__ )
lowercase_ : List[Any] = BarkFineModel.from_pretrained(UpperCAmelCase__ )
lowercase_ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
lowercase_ : Union[str, Any] = BarkConfig.from_sub_model_configs(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowercase_ : Union[str, Any] = BarkModel(UpperCAmelCase__ )
lowercase_ : str = semantic
lowercase_ : Optional[int] = coarseAcoustic
lowercase_ : Tuple = fineAcoustic
lowercase_ : Union[str, Any] = codec
lowercase_ : Tuple = bark_generation_config
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
bark.save_pretrained(UpperCAmelCase__ , repo_id=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
_lowercase : int = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any ) -> str:
# load base model
lowercase_ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase_ : Optional[int] = load_file(UpperCAmelCase__ )
lowercase_ : str = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase_ : List[str] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowercase_ : int = pipeline.text_encoder
else:
lowercase_ : str = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowercase_ : Optional[int] = pipeline.unet
# find the target layer
lowercase_ : List[str] = layer_infos.pop(0 )
while len(UpperCAmelCase__ ) > -1:
try:
lowercase_ : Union[str, Any] = curr_layer.__getattr__(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
lowercase_ : Tuple = layer_infos.pop(0 )
elif len(UpperCAmelCase__ ) == 0:
break
except Exception:
if len(UpperCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase_ : Dict = layer_infos.pop(0 )
lowercase_ : int = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(UpperCAmelCase__ )
else:
pair_keys.append(UpperCAmelCase__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase_ : str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase_ : List[Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase_ : Dict = state_dict[pair_keys[0]].to(torch.floataa )
lowercase_ : int = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase__ )
return pipeline
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.7_5, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
_lowercase : List[str] = parser.parse_args()
_lowercase : Tuple = args.base_model_path
_lowercase : Dict = args.checkpoint_path
_lowercase : Union[str, Any] = args.dump_path
_lowercase : Dict = args.lora_prefix_unet
_lowercase : Tuple = args.lora_prefix_text_encoder
_lowercase : List[str] = args.alpha
_lowercase : Dict = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowercase : str = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : Tuple = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''openai-gpt'''
UpperCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , lowercase_ : str=40478 , lowercase_ : Optional[Any]=512 , lowercase_ : str=768 , lowercase_ : List[str]=12 , lowercase_ : Tuple=12 , lowercase_ : str="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Dict=0.1 , lowercase_ : int=1E-5 , lowercase_ : List[str]=0.02 , lowercase_ : Dict="cls_index" , lowercase_ : List[Any]=True , lowercase_ : int=None , lowercase_ : List[str]=True , lowercase_ : List[Any]=0.1 , **lowercase_ : List[Any] , ):
lowercase_ : Optional[Any] = vocab_size
lowercase_ : List[Any] = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : Dict = n_layer
lowercase_ : List[str] = n_head
lowercase_ : List[Any] = afn
lowercase_ : Any = resid_pdrop
lowercase_ : Tuple = embd_pdrop
lowercase_ : Union[str, Any] = attn_pdrop
lowercase_ : Optional[int] = layer_norm_epsilon
lowercase_ : int = initializer_range
lowercase_ : List[Any] = summary_type
lowercase_ : Any = summary_use_proj
lowercase_ : str = summary_activation
lowercase_ : Tuple = summary_first_dropout
lowercase_ : Tuple = summary_proj_to_labels
super().__init__(**lowercase_ )
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( _UpperCAmelCase):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( lowercase_ : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
raise NotImplementedError()
| 30
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_lowercase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Any ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 30
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__ , 2 ) + pow(UpperCAmelCase__ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
return "".join(sorted(UpperCAmelCase__ ) )
def lowerCamelCase ( UpperCAmelCase__ : str ) -> list[str]:
return word_by_signature[signature(UpperCAmelCase__ )]
_lowercase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
_lowercase : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
_lowercase : Any = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowercase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 30
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowercase_ : List[str] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowercase_ : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
lowercase_ : Any = primes[:idx]
break
lowercase_ , lowercase_ : str = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase_ : Optional[int] = False
for r in range(UpperCAmelCase__ ):
lowercase_ : Any = pow(UpperCAmelCase__ , d * 2**r , UpperCAmelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase_ : List[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
from torch import nn
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] ):
super().__init__()
lowercase_ : Optional[Any] = class_size
lowercase_ : str = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowercase_ : Union[str, Any] = nn.Linear(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[str] ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowercase_ : Dict = self.mlp(lowercase_ )
return logits
| 30
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 1
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : List[str] = {"vocab_file": "spiece.model"}
_lowercase : List[Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_lowercase : Dict = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowercase_ : Optional[int] , lowercase_ : Any=False , lowercase_ : Union[str, Any]=False , lowercase_ : Dict=False , lowercase_ : str=None , lowercase_ : Optional[int]=None , lowercase_ : str=None , lowercase_ : Any=None , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ):
lowercase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : Union[str, Any] = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowercase_ : List[Any] = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase_ : Dict = """<|endoftext|>""" if eos_token is None else eos_token
lowercase_ : Optional[Any] = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase_ : Union[str, Any] = unk_token if pad_token is None else pad_token
lowercase_ : int = eos_token if bos_token is None else bos_token
else:
lowercase_ : Optional[Any] = """<pad>""" if pad_token is None else pad_token
lowercase_ : int = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowercase_ : Optional[Any] = do_lower_case
lowercase_ : List[Any] = remove_space
lowercase_ : Tuple = keep_accents
lowercase_ : List[Any] = vocab_file
lowercase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# Used for whitespace normalization in input texts
# fmt : off
lowercase_ : Optional[Any] = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase_ : int = re.compile(
f'''[{''.join(map(lowercase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Tuple ):
lowercase_ : int = self.__dict__.copy()
lowercase_ : Union[str, Any] = None
return state
def __setstate__( self : Union[str, Any] , lowercase_ : str ):
lowercase_ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase_ : Optional[Any] = {}
lowercase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str ):
lowercase_ : Union[str, Any] = self.non_printing_characters_re.sub("""""" , lowercase_ )
# Normalize whitespaces
lowercase_ : Union[str, Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowercase_ : Tuple = unicodedata.normalize("""NFC""" , lowercase_ )
return text
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , **lowercase_ : List[Any] ):
lowercase_ : Tuple = self.preprocess_text(lowercase_ )
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
return self.sp_model.PieceToId(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : int ):
return self.sp_model.IdToPiece(lowercase_ )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( lowercase_ : str ):
return out_string
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = []
lowercase_ : Optional[Any] = """"""
lowercase_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
lowercase_ : Optional[Any] = True
lowercase_ : Optional[Any] = []
else:
current_sub_tokens.append(lowercase_ )
lowercase_ : List[str] = False
out_string += self.sp_model.decode(lowercase_ )
return out_string
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : str = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , """wb""" ) as fi:
lowercase_ : Any = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, List[str]] , lowercase_ : Union[str, bool] = False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : str = self.preprocess_text(lowercase_ )
lowercase_ : Any = self.sp_model.encode(lowercase_ )
else:
lowercase_ : int = [self.preprocess_text(lowercase_ ) for t in text]
lowercase_ : Dict = self.sp_model.encode(lowercase_ )
if return_tensors is True or return_tensors == "pt":
lowercase_ : List[str] = torch.tensor(lowercase_ )
return token_ids
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[int, List[int]] ):
return self.sp_model.decode(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : "Conversation" ):
lowercase_ : Optional[Any] = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
lowercase_ : Optional[Any] = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(lowercase_ ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=lowercase_ )
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase ( ) -> Any:
lowercase_ : int = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase__ , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase__ , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase__ , default=5e-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase__ , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase__ , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase__ , default="""./results""" )
return parser.parse_args()
_lowercase : Any = load("accuracy")
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowercase_ , lowercase_ : str = eval_pred
lowercase_ : Union[str, Any] = np.argmax(UpperCAmelCase__ , axis=1 )
return metric.compute(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[int] , lowercase_ : Tuple ):
super().__init__()
lowercase_ : List[Any] = trainer
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , **lowercase_ : Dict ):
if control.should_evaluate:
lowercase_ : Union[str, Any] = deepcopy(lowercase_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def lowerCamelCase ( ) -> List[str]:
lowercase_ : Union[str, Any] = get_args()
set_seed(args.seed )
lowercase_ : Union[str, Any] = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
lowercase_ : Any = dataset.train_test_split(test_size=0.2 )
lowercase_ : Any = train_test["""test"""].train_test_split(test_size=0.5 )
lowercase_ : str = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
lowercase_ : int = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase_ : Optional[int] = tokenizer.eos_token
lowercase_ : Any = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowercase_ : Optional[Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowercase_ : Tuple = False
lowercase_ : Union[str, Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase__ : Optional[Any] ):
lowercase_ : Any = tokenizer(example["""src"""] , truncation=UpperCAmelCase__ , max_length=1024 )
lowercase_ : int = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowercase_ : List[str] = train_test_validation.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=train_test_validation["""train"""].column_names , )
lowercase_ : int = DataCollatorWithPadding(tokenizer=UpperCAmelCase__ )
lowercase_ : Dict = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
lowercase_ : List[Any] = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 30
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Optional[Any]:
for param in module.parameters():
lowercase_ : Optional[Any] = False
def lowerCamelCase ( ) -> str:
lowercase_ : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase_ : Union[str, Any] = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> int:
lowercase_ : Union[str, Any] = plt.imshow(UpperCAmelCase__ )
fig.axes.get_xaxis().set_visible(UpperCAmelCase__ )
fig.axes.get_yaxis().set_visible(UpperCAmelCase__ )
plt.show()
def lowerCamelCase ( ) -> str:
lowercase_ : Dict = datetime.now()
lowercase_ : Optional[Any] = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 30
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
UpperCamelCase__ = '''CIDAS/clipseg-rd64-refined'''
UpperCamelCase__ = '''image_segmenter'''
UpperCamelCase__ = CLIPSegForImageSegmentation
UpperCamelCase__ = ['''image''', '''text''']
UpperCamelCase__ = ['''image''']
def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : int ):
requires_backends(self , ["""vision"""] )
super().__init__(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : "Image" , lowercase_ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=lowercase_ , return_tensors="""pt""" )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Dict ):
with torch.no_grad():
lowercase_ : Optional[Any] = self.model(**lowercase_ ).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[str] ):
lowercase_ : str = outputs.cpu().detach().numpy()
lowercase_ : Optional[int] = 0
lowercase_ : Optional[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 1
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase__ = True
UpperCamelCase__ = '''ml.p3.2xlarge'''
UpperCamelCase__ = '''accelerate_sagemaker_execution_role'''
UpperCamelCase__ = '''hf-sm'''
UpperCamelCase__ = '''us-east-1'''
UpperCamelCase__ = 1
UpperCamelCase__ = '''accelerate-sagemaker-1'''
UpperCamelCase__ = '''1.6'''
UpperCamelCase__ = '''4.4'''
UpperCamelCase__ = '''train.py'''
UpperCamelCase__ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
UpperCamelCase__ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase_ : Tuple = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , lowercase_ )
assert isinstance(converted_args["""do_train"""] , lowercase_ )
assert isinstance(converted_args["""epochs"""] , lowercase_ )
assert isinstance(converted_args["""learning_rate"""] , lowercase_ )
assert isinstance(converted_args["""max_steps"""] , lowercase_ )
with pytest.raises(lowercase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 30
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 1
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowercase : Optional[int] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowercase : List[str] = concatenate_datasets
_lowercase : Any = DownloadConfig
_lowercase : List[Any] = DownloadManager
_lowercase : Union[str, Any] = DownloadMode
_lowercase : Any = DownloadConfig
_lowercase : str = DownloadMode
_lowercase : Tuple = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 30
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''glpn'''
def __init__( self : List[Any] , lowercase_ : Optional[int]=3 , lowercase_ : Tuple=4 , lowercase_ : Any=[2, 2, 2, 2] , lowercase_ : Optional[int]=[8, 4, 2, 1] , lowercase_ : Tuple=[32, 64, 160, 256] , lowercase_ : Optional[int]=[7, 3, 3, 3] , lowercase_ : Dict=[4, 2, 2, 2] , lowercase_ : List[Any]=[1, 2, 5, 8] , lowercase_ : str=[4, 4, 4, 4] , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=1E-6 , lowercase_ : int=64 , lowercase_ : str=10 , lowercase_ : Union[str, Any]=-1 , **lowercase_ : Optional[int] , ):
super().__init__(**lowercase_ )
lowercase_ : int = num_channels
lowercase_ : int = num_encoder_blocks
lowercase_ : Union[str, Any] = depths
lowercase_ : List[Any] = sr_ratios
lowercase_ : List[Any] = hidden_sizes
lowercase_ : Any = patch_sizes
lowercase_ : Union[str, Any] = strides
lowercase_ : List[Any] = mlp_ratios
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : Tuple = drop_path_rate
lowercase_ : Any = layer_norm_eps
lowercase_ : List[Any] = decoder_hidden_size
lowercase_ : Union[str, Any] = max_depth
lowercase_ : Dict = head_in_index
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : List[Any] = s.rsplit(UpperCAmelCase__ , UpperCAmelCase__ )
return new.join(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowercase_ : int = {}
lowercase_ : Any = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase_ : Tuple = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
lowercase_ : Any = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase_ : int = rreplace(UpperCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase_ : int = rreplace(UpperCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase_ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=True ) -> Tuple:
from dall_e import Encoder
lowercase_ : Tuple = Encoder()
if os.path.exists(UpperCAmelCase__ ):
lowercase_ : Tuple = torch.load(UpperCAmelCase__ )
else:
lowercase_ : Dict = torch.hub.load_state_dict_from_url(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Dict = ckpt.state_dict()
encoder.load_state_dict(UpperCAmelCase__ )
if config_path is not None:
lowercase_ : Dict = FlavaImageCodebookConfig.from_pretrained(UpperCAmelCase__ )
else:
lowercase_ : Any = FlavaImageCodebookConfig()
lowercase_ : Optional[int] = FlavaImageCodebook(UpperCAmelCase__ ).eval()
lowercase_ : int = encoder.state_dict()
lowercase_ : Optional[Any] = upgrade_state_dict(UpperCAmelCase__ )
hf_model.load_state_dict(UpperCAmelCase__ )
lowercase_ : int = hf_model.state_dict()
lowercase_ : Dict = count_parameters(UpperCAmelCase__ )
lowercase_ : List[str] = count_parameters(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(UpperCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_lowercase : Optional[int] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> Tuple:
# Initialise PyTorch model
lowercase_ : List[Any] = RemBertConfig.from_json_file(UpperCAmelCase__ )
print("""Building PyTorch model from configuration: {}""".format(str(UpperCAmelCase__ ) ) )
lowercase_ : Any = RemBertModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(UpperCAmelCase__ ) )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : str = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 30
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 1
|
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_lowercase : Optional[Any] = datasets.logging.get_logger(__name__)
_lowercase : Union[str, Any] = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
_lowercase : Optional[Any] = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
_lowercase : int = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
_lowercase : Dict = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Any ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
lowercase_ : int = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
lowercase_ : Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowercase_ : Dict = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
lowercase_ : List[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowercase_ : Optional[Any] = score.BleurtScorer(os.path.join(lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[Any] , lowercase_ : Dict ):
lowercase_ : int = self.scorer.score(references=lowercase_ , candidates=lowercase_ )
return {"scores": scores}
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __magic_name__ :
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : int ):
return None
class __magic_name__ :
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ):
return None
class __magic_name__ ( unittest.TestCase):
UpperCamelCase__ = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ , """tf""" , 12 , **lowercase_ )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase_ , """pt""" , 12 , **lowercase_ )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
from transformers import BertModel
lowercase_ : str = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(lowercase_ ) )
vocab_file.flush()
lowercase_ : str = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Any = BertModel(BertConfig(vocab_size=len(lowercase_ ) ) )
model.save_pretrained(lowercase_ )
self._test_export(lowercase_ , """pt""" , 12 , lowercase_ )
@require_tf
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(lowercase_ , """tf""" , 12 , **lowercase_ )
lowercase_ : Dict = quantize(Path(lowercase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Union[str, Any] = self._test_export(lowercase_ , """pt""" , 12 , **lowercase_ )
lowercase_ : List[Any] = quantize(lowercase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase_ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str]=None , **lowercase_ : Tuple ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : List[str] = Path(lowercase_ ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
return path
except Exception as e:
self.fail(lowercase_ )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
from transformers import BertModel
lowercase_ : List[str] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase_ , lowercase_ , """pt""" )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
from transformers import TFBertModel
lowercase_ : Any = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowercase_ : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase_ , lowercase_ , """tf""" )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ):
lowercase_ : Optional[int] = FeatureExtractionPipeline(lowercase_ , lowercase_ )
lowercase_ : List[Any] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(lowercase_ , lowercase_ )
# Assert all variables are present
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowercase_ )
self.assertSequenceEqual(variable_names[3:] , lowercase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowercase_ : List[Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowercase_ , lowercase_ : Optional[Any] = ensure_valid_input(FuncContiguousArgs() , lowercase_ , lowercase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowercase_ ) , set(lowercase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase_ , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , lowercase_ , lowercase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase_ ) , 1 )
self.assertEqual(len(lowercase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
'''simple docstring'''
import math
import random
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_lowercase : List[Any] = 0.0_2
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
lowercase_ : List[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(UpperCAmelCase__ ):
# Forward propagation
lowercase_ : int = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase_ : List[Any] = (expected / 100) - layer_a
# Error delta
lowercase_ : Optional[int] = layer_1_error * sigmoid_function(UpperCAmelCase__ , UpperCAmelCase__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : int = int(input("Expected value: "))
_lowercase : Any = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_lowercase : Tuple = "scheduler_config.json"
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = 1
UpperCamelCase__ = 2
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 5
UpperCamelCase__ = 6
UpperCamelCase__ = 7
UpperCamelCase__ = 8
UpperCamelCase__ = 9
UpperCamelCase__ = 10
UpperCamelCase__ = 11
UpperCamelCase__ = 12
UpperCamelCase__ = 13
UpperCamelCase__ = 14
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = 42
class __magic_name__ :
UpperCamelCase__ = SCHEDULER_CONFIG_NAME
UpperCamelCase__ = []
UpperCamelCase__ = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , lowercase_ : Dict[str, Any] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[int]=False , **lowercase_ : int , ):
lowercase_ , lowercase_ , lowercase_ : Tuple = cls.load_config(
pretrained_model_name_or_path=lowercase_ , subfolder=lowercase_ , return_unused_kwargs=lowercase_ , return_commit_hash=lowercase_ , **lowercase_ , )
return cls.from_config(lowercase_ , return_unused_kwargs=lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, os.PathLike] , lowercase_ : bool = False , **lowercase_ : str ):
self.save_config(save_directory=lowercase_ , push_to_hub=lowercase_ , **lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int ):
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : Optional[Any] = importlib.import_module(__name__.split(""".""" )[0] )
lowercase_ : Dict = [
getattr(lowercase_ , lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ , lowercase_ )
]
return compatible_classes
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''flax''', '''transformers''']
def __init__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : int ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Tuple , **lowercase_ : int ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : str ):
requires_backends(cls , ["""flax""", """transformers"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''flax''', '''transformers''']
def __init__( self : Optional[Any] , *lowercase_ : Dict , **lowercase_ : Any ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Tuple ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : str ):
requires_backends(cls , ["""flax""", """transformers"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''flax''', '''transformers''']
def __init__( self : str , *lowercase_ : int , **lowercase_ : Union[str, Any] ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Optional[int] , **lowercase_ : Any ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : int , **lowercase_ : str ):
requires_backends(cls , ["""flax""", """transformers"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''flax''', '''transformers''']
def __init__( self : str , *lowercase_ : int , **lowercase_ : Optional[Any] ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : List[Any] ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Dict , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""flax""", """transformers"""] )
| 30
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> tuple[int, int]:
if b == 0:
return (1, 0)
((lowercase_) , (lowercase_)) : str = extended_euclid(UpperCAmelCase__ , a % b )
lowercase_ : str = a // b
return (y, x - k * y)
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
((lowercase_) , (lowercase_)) : Union[str, Any] = extended_euclid(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[str] = na * na
lowercase_ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
((lowercase_) , (lowercase_)) : Any = extended_euclid(UpperCAmelCase__ , UpperCAmelCase__ )
if b < 0:
lowercase_ : Optional[int] = (b % n + n) % n
return b
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
lowercase_ , lowercase_ : Optional[Any] = invert_modulo(UpperCAmelCase__ , UpperCAmelCase__ ), invert_modulo(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = na * na
lowercase_ : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 30
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 1
|
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase ( UpperCAmelCase__ : int ) -> str:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowercase_ : Optional[Any] = precision
lowercase_ : List[Any] = ceil(precision / 14 )
lowercase_ : Optional[int] = 426880 * Decimal(10005 ).sqrt()
lowercase_ : List[str] = 1
lowercase_ : List[Any] = 13591409
lowercase_ : Union[str, Any] = Decimal(UpperCAmelCase__ )
for k in range(1 , UpperCAmelCase__ ):
lowercase_ : str = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCAmelCase__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowercase : int = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 30
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 1
|
'''simple docstring'''
class __magic_name__ :
def __init__( self : List[str] ):
lowercase_ : Optional[Any] = {}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
print(self.vertex )
for i in self.vertex:
print(lowercase_ , """ -> """ , """ -> """.join([str(lowercase_ ) for j in self.vertex[i]] ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase_ )
else:
# else make a new vertex
lowercase_ : Union[str, Any] = [to_vertex]
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# visited array for storing already visited nodes
lowercase_ : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : int , lowercase_ : list ):
# mark start vertex as visited
lowercase_ : Dict = True
print(lowercase_ , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowercase : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 30
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Optional[int] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 30
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 1
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_lowercase : Dict = logging.getLogger(__name__)
_lowercase : int = tf.data.AUTOTUNE
def lowerCamelCase ( ) -> List[str]:
lowercase_ : Union[str, Any] = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=UpperCAmelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=UpperCAmelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=UpperCAmelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=UpperCAmelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=UpperCAmelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=UpperCAmelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=UpperCAmelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=UpperCAmelCase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=UpperCAmelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=UpperCAmelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=UpperCAmelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=UpperCAmelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=UpperCAmelCase__ , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=UpperCAmelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=UpperCAmelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase_ : List[str] = parser.parse_args()
return args
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> str:
try:
if args.tpu_name:
lowercase_ : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase_ : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(UpperCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(UpperCAmelCase__ )
return tpu
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Optional[int]:
lowercase_ : Dict = 0
for file in file_list:
lowercase_ : List[Any] = file.split("""/""" )[-1]
lowercase_ : Union[str, Any] = re.search(R"""-\d+-(\d+)\.tfrecord""" , UpperCAmelCase__ ).group(1 )
lowercase_ : Optional[int] = int(UpperCAmelCase__ )
num_samples += sample_count
return num_samples
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=None ) -> Union[str, Any]:
lowercase_ : Optional[Any] = count_samples(UpperCAmelCase__ )
lowercase_ : List[str] = tf.data.Dataset.from_tensor_slices(UpperCAmelCase__ )
if shuffle:
lowercase_ : int = dataset.shuffle(len(UpperCAmelCase__ ) )
lowercase_ : Any = tf.data.TFRecordDataset(UpperCAmelCase__ , num_parallel_reads=UpperCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase_ : Tuple = dataset.apply(tf.data.experimental.assert_cardinality(UpperCAmelCase__ ) )
lowercase_ : str = dataset.map(UpperCAmelCase__ , num_parallel_calls=UpperCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase_ : Optional[int] = dataset.shuffle(args.shuffle_buffer_size )
lowercase_ : Union[str, Any] = dataset.batch(UpperCAmelCase__ , drop_remainder=UpperCAmelCase__ )
lowercase_ : str = dataset.map(UpperCAmelCase__ , num_parallel_calls=UpperCAmelCase__ )
lowercase_ : Dict = dataset.prefetch(UpperCAmelCase__ )
return dataset
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> Optional[int]:
if not args.no_tpu:
lowercase_ : Optional[Any] = initialize_tpu(UpperCAmelCase__ )
lowercase_ : Optional[int] = tf.distribute.TPUStrategy(UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase_ : int = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase_ : int = tokenizer.vocab_size
lowercase_ : Any = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase_ : str = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase_ : Tuple = count_samples(UpperCAmelCase__ )
lowercase_ : Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase_ : Dict = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase_ : Any = TFAutoModelForMaskedLM.from_config(UpperCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase_ , lowercase_ : Any = create_optimizer(
num_train_steps=UpperCAmelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=UpperCAmelCase__ , metrics=["""accuracy"""] )
def decode_fn(UpperCAmelCase__ : Union[str, Any] ):
lowercase_ : List[str] = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(UpperCAmelCase__ , UpperCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase_ : int = DataCollatorForLanguageModeling(
tokenizer=UpperCAmelCase__ , mlm_probability=args.mlm_probability , mlm=UpperCAmelCase__ , return_tensors="""tf""" )
def mask_with_collator(UpperCAmelCase__ : List[str] ):
# TF really needs an isin() function
lowercase_ : Dict = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase_ , lowercase_ : Tuple = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(UpperCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=UpperCAmelCase__ , )
return batch
lowercase_ : Any = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase_ : Optional[Any] = prepare_dataset(
UpperCAmelCase__ , decode_fn=UpperCAmelCase__ , mask_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , shuffle=UpperCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase_ : Optional[Any] = prepare_dataset(
UpperCAmelCase__ , decode_fn=UpperCAmelCase__ , mask_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , shuffle=UpperCAmelCase__ , )
lowercase_ : Dict = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=UpperCAmelCase__ ) )
model.fit(
UpperCAmelCase__ , validation_data=UpperCAmelCase__ , epochs=args.num_epochs , callbacks=UpperCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_lowercase : List[Any] = parse_args()
main(args)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list[int] ) -> bool:
return len(set(UpperCAmelCase__ ) ) == len(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 1
|
'''simple docstring'''
import math
import qiskit
def lowerCamelCase ( UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
or isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
or isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(UpperCAmelCase__ ) != input_a)
or (math.floor(UpperCAmelCase__ ) != input_a)
or (math.floor(UpperCAmelCase__ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
lowercase_ : Union[str, Any] = qiskit.QuantumRegister(4 , """qr""" )
lowercase_ : Optional[Any] = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
lowercase_ : Tuple = [input_a, input_a, carry_in]
lowercase_ : List[str] = qiskit.QuantumCircuit(UpperCAmelCase__ , UpperCAmelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCAmelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCAmelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCAmelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCAmelCase__ ) # measure the last two qbits
lowercase_ : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
lowercase_ : Union[str, Any] = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1000 )
return job.result().get_counts(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
from ... import PretrainedConfig
_lowercase : Union[str, Any] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase__ = '''nezha'''
def __init__( self : Dict , lowercase_ : Tuple=21128 , lowercase_ : Dict=768 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : str=3072 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Dict=512 , lowercase_ : str=64 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=1E-12 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=0 , lowercase_ : Optional[Any]=2 , lowercase_ : str=3 , lowercase_ : Optional[int]=True , **lowercase_ : Dict , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : Dict = vocab_size
lowercase_ : int = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : List[Any] = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : Tuple = max_relative_position
lowercase_ : List[str] = type_vocab_size
lowercase_ : List[str] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Union[str, Any] = classifier_dropout
lowercase_ : List[Any] = use_cache
| 30
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 1
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_lowercase : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[int] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Any=None , lowercase_ : Union[str, Any]=1 ):
lowercase_ : List[str] = tokenizer
lowercase_ : Optional[Any] = dataset
lowercase_ : int = len(lowercase_ ) if n_tasks is None else n_tasks
lowercase_ : Optional[int] = n_copies
def __iter__( self : List[str] ):
lowercase_ : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
lowercase_ : str = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : List[str] , lowercase_ : str , lowercase_ : List[Any] ):
lowercase_ : List[str] = start_length
lowercase_ : Optional[Any] = eof_strings
lowercase_ : str = tokenizer
def __call__( self : Optional[int] , lowercase_ : str , lowercase_ : List[Any] , **lowercase_ : List[str] ):
lowercase_ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowercase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowercase_ )
def lowerCamelCase ( UpperCAmelCase__ : str ) -> List[str]:
lowercase_ : Optional[Any] = re.split("""(%s)""" % """|""".join(UpperCAmelCase__ ) , UpperCAmelCase__ )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=20 , **UpperCAmelCase__ : List[Any] ) -> Dict:
lowercase_ : str = defaultdict(UpperCAmelCase__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(UpperCAmelCase__ ) ):
with torch.no_grad():
lowercase_ : str = batch["""ids"""].shape[-1]
lowercase_ : int = accelerator.unwrap_model(UpperCAmelCase__ ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=UpperCAmelCase__ , **UpperCAmelCase__ )
# each task is generated batch_size times
lowercase_ : Tuple = batch["""task_id"""].repeat(UpperCAmelCase__ )
lowercase_ : str = accelerator.pad_across_processes(
UpperCAmelCase__ , dim=1 , pad_index=tokenizer.pad_token_id )
lowercase_ , lowercase_ : Optional[int] = accelerator.gather((generated_tokens, generated_tasks) )
lowercase_ : Optional[Any] = generated_tokens.cpu().numpy()
lowercase_ : List[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
gen_token_dict[task].append(UpperCAmelCase__ )
lowercase_ : str = [[] for _ in range(UpperCAmelCase__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowercase_ : List[Any] = tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ )
code_gens[task].append(remove_last_block(UpperCAmelCase__ ) )
return code_gens
def lowerCamelCase ( ) -> List[str]:
# Setup configuration
lowercase_ : str = HfArgumentParser(UpperCAmelCase__ )
lowercase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowercase_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowercase_ : Optional[Any] = """false"""
if args.num_workers is None:
lowercase_ : Any = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowercase_ : Optional[Any] = Accelerator()
set_seed(args.seed , device_specific=UpperCAmelCase__ )
# Load model and tokenizer
lowercase_ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase_ : str = tokenizer.eos_token
lowercase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowercase_ : Union[str, Any] = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCAmelCase__ , UpperCAmelCase__ )] ),
}
# Load evaluation dataset and metric
lowercase_ : Optional[int] = load_dataset("""openai_humaneval""" )
lowercase_ : Optional[int] = load_metric("""code_eval""" )
lowercase_ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
lowercase_ : Union[str, Any] = args.n_samples // args.batch_size
lowercase_ : int = TokenizedDataset(UpperCAmelCase__ , human_eval["""test"""] , n_copies=UpperCAmelCase__ , n_tasks=UpperCAmelCase__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowercase_ : Tuple = DataLoader(UpperCAmelCase__ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowercase_ : Dict = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
lowercase_ , lowercase_ : Union[str, Any] = accelerator.prepare(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Optional[int] = complete_code(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , n_tasks=UpperCAmelCase__ , batch_size=args.batch_size , **UpperCAmelCase__ , )
if accelerator.is_main_process:
lowercase_ : List[Any] = []
for task in tqdm(range(UpperCAmelCase__ ) ):
lowercase_ : str = human_eval["""test"""][task]["""test"""]
lowercase_ : Dict = F'''check({human_eval['test'][task]['entry_point']})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
lowercase_ , lowercase_ : str = code_eval_metric.compute(
references=UpperCAmelCase__ , predictions=UpperCAmelCase__ , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 30
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 1
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __magic_name__ :
pass
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : str = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : List[str] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : List[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_lowercase : Tuple = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_lowercase : int = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_lowercase : Any = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_lowercase : List[str] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_lowercase : Any = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : Dict = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_lowercase : List[Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_lowercase : Union[str, Any] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCAmelCase)
class __magic_name__ :
def __call__( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Union[bool, str] = False , lowercase_ : Union[bool, str] = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[bool] = None , **lowercase_ : int , ):
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
lowercase_ : Tuple = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
lowercase_ : Optional[int] = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
lowercase_ : List[str] = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
lowercase_ : str = len(lowercase_ )
lowercase_ : int = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
f'''There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts.''' )
lowercase_ : Tuple = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
lowercase_ : int = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )["""input_ids"""]
lowercase_ : str = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
lowercase_ : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase_ : Any = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : BatchEncoding , lowercase_ : DPRReaderOutput , lowercase_ : int = 16 , lowercase_ : int = 64 , lowercase_ : int = 4 , ):
lowercase_ : str = reader_input["""input_ids"""]
lowercase_ , lowercase_ , lowercase_ : Tuple = reader_output[:3]
lowercase_ : Optional[Any] = len(lowercase_ )
lowercase_ : Optional[Any] = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
lowercase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowercase_ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase_ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase_ : Any = sequence_ids.index(self.pad_token_id )
else:
lowercase_ : Optional[int] = len(lowercase_ )
lowercase_ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[int] , lowercase_ : List[int] , lowercase_ : int , lowercase_ : int , ):
lowercase_ : int = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase_ : Dict = sorted(lowercase_ , key=lambda lowercase_ : x[1] , reverse=lowercase_ )
lowercase_ : Any = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
lowercase_ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
| 30
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 30
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 1
|
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_lowercase : Optional[Any] = parse(importlib.metadata.version("torch"))
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Version] , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Optional[Any]:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
lowercase_ : Union[str, Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Optional[int] = parse(importlib.metadata.version(UpperCAmelCase__ ) )
return operation(UpperCAmelCase__ , parse(UpperCAmelCase__ ) )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Optional[int]:
return compare_versions(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
| 30
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowercase_ , lowercase_ : int = 1, 1
for _ in range(number_of_steps - 1 ):
lowercase_ , lowercase_ : Optional[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_lowercase : str = parser.parse_args()
if args.model_type == "roberta":
_lowercase : Optional[Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_lowercase : int = "roberta"
elif args.model_type == "gpt2":
_lowercase : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name)
_lowercase : Optional[int] = "transformer"
_lowercase : Tuple = model.state_dict()
_lowercase : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_lowercase : Optional[int] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_lowercase : List[Any] = f"""{prefix}.embeddings.{w}.weight"""
_lowercase : List[str] = state_dict[param_name]
for w in ["weight", "bias"]:
_lowercase : Optional[Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_lowercase : Union[str, Any] = state_dict[param_name]
# Transformer Blocks #
_lowercase : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_lowercase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_lowercase : List[str] = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_lowercase : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_lowercase : Any = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowercase : Dict = state_dict[f"""lm_head.dense.{w}"""]
_lowercase : str = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_lowercase : str = state_dict[f"""{prefix}.ln_f.{w}"""]
_lowercase : List[Any] = state_dict["lm_head.weight"]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowercase : List[Any] = get_tests_dir("fixtures/dummy-config.json")
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = 0
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Any = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : str = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowercase_ : Union[str, Any] = os.path.join(lowercase_ , """fake-roberta""" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with open(os.path.join(lowercase_ , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowercase_ : Any = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(type(lowercase_ ) , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
try:
AutoConfig.register("""custom""" , lowercase_ )
# Wrong model type will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register("""model""" , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register("""bert""" , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase_ : List[Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : int = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE_ ( self : Any ):
with self.assertRaisesRegex(
lowercase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
lowercase_ : Optional[int] = AutoConfig.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
with self.assertRaisesRegex(
lowercase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowercase_ : Optional[Any] = AutoConfig.from_pretrained(lowercase_ , revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
with self.assertRaisesRegex(
lowercase_ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowercase_ : Tuple = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
lowercase_ : str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
lowercase_ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowercase_ )
lowercase_ : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
lowercase_ : Tuple = AutoConfig.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''new-model'''
try:
AutoConfig.register("""new-model""" , lowercase_ )
# If remote code is not set, the default is to use local
lowercase_ : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowercase_ : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowercase_ : Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 30
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCAmelCase__ ) )
def lowerCamelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int ) -> bool:
# Base Case
if index == len(UpperCAmelCase__ ):
return True
# Recursive Step
for i in range(UpperCAmelCase__ ):
if valid_coloring(graph[index] , UpperCAmelCase__ , UpperCAmelCase__ ):
# Color current vertex
lowercase_ : Union[str, Any] = i
# Validate coloring
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ):
return True
# Backtrack
lowercase_ : int = -1
return False
def lowerCamelCase ( UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int ) -> list[int]:
lowercase_ : List[Any] = [-1] * len(UpperCAmelCase__ )
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 0 ):
return colored_vertices
return []
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
_lowercase : Union[str, Any] = "2020.9.26"
_lowercase : Any = "xcodz-dot, cclaus, dhruvmanila"
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> tuple[float, float]:
if not all(isinstance(UpperCAmelCase__ , (float, int) ) for val in locals().values() ):
lowercase_ : str = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(UpperCAmelCase__ )
lowercase_ : str = ((x * distance) / (z + distance)) * scale
lowercase_ : Optional[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : str , UpperCAmelCase__ : float ) -> tuple[float, float, float]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Axis must be a str""" )
lowercase_ : int = locals()
del input_variables["axis"]
if not all(isinstance(UpperCAmelCase__ , (float, int) ) for val in input_variables.values() ):
lowercase_ : List[Any] = (
"""Input values except axis must either be float or int: """
F'''{list(input_variables.values() )}'''
)
raise TypeError(UpperCAmelCase__ )
lowercase_ : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowercase_ : Union[str, Any] = x * math.cos(UpperCAmelCase__ ) - y * math.sin(UpperCAmelCase__ )
lowercase_ : Optional[Any] = y * math.cos(UpperCAmelCase__ ) + x * math.sin(UpperCAmelCase__ )
lowercase_ : Dict = z
elif axis == "x":
lowercase_ : Dict = y * math.cos(UpperCAmelCase__ ) - z * math.sin(UpperCAmelCase__ )
lowercase_ : int = z * math.cos(UpperCAmelCase__ ) + y * math.sin(UpperCAmelCase__ )
lowercase_ : str = x
elif axis == "y":
lowercase_ : str = x * math.cos(UpperCAmelCase__ ) - z * math.sin(UpperCAmelCase__ )
lowercase_ : Any = z * math.cos(UpperCAmelCase__ ) + x * math.sin(UpperCAmelCase__ )
lowercase_ : Optional[Any] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }""")
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''philschmid/bart-large-cnn-samsum'''
UpperCamelCase__ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
UpperCamelCase__ = '''summarizer'''
UpperCamelCase__ = AutoTokenizer
UpperCamelCase__ = AutoModelForSeqaSeqLM
UpperCamelCase__ = ['''text''']
UpperCamelCase__ = ['''text''']
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any ):
return self.pre_processor(lowercase_ , return_tensors="""pt""" , truncation=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Tuple ):
return self.model.generate(**lowercase_ )[0]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
return self.pre_processor.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase_ : Optional[int] = str(abs(UpperCAmelCase__ ) )
lowercase_ : Optional[Any] = [list(UpperCAmelCase__ ) for char in range(len(UpperCAmelCase__ ) )]
for index in range(len(UpperCAmelCase__ ) ):
num_transpositions[index].pop(UpperCAmelCase__ )
return max(
int("""""".join(list(UpperCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 30
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : list ) -> list:
lowercase_ : Any = len(UpperCAmelCase__ )
for _ in range(UpperCAmelCase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase_ , lowercase_ : Optional[int] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_lowercase : List[Any] = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 30
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ) -> tuple[float, float]:
# Check if the input is valid
if not len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
lowercase_ , lowercase_ , lowercase_ : Any = equationa
lowercase_ , lowercase_ , lowercase_ : Optional[int] = equationa
# Calculate the determinants of the matrices
lowercase_ : List[Any] = aa * ba - aa * ba
lowercase_ : str = ca * ba - ca * ba
lowercase_ : str = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowercase_ : List[str] = determinant_x / determinant
lowercase_ : Union[str, Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 30
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 1
|
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowercase : List[Any] = logging.getLogger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[Any]=16 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : int = 2 ) -> Optional[Any]:
def get_dataset(UpperCAmelCase__ : Tuple ):
lowercase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCAmelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase_ : Any = get_dataset(UpperCAmelCase__ )
lowercase_ : str = get_dataset(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
lowercase_ : Optional[Any] = DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=None ) -> Dict:
lowercase_ : Optional[int] = []
for epoch in range(UpperCAmelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
lowercase_ , lowercase_ : int = batch
lowercase_ : Tuple = model(UpperCAmelCase__ )
lowercase_ : Optional[Any] = torch.nn.functional.mse_loss(UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] ):
super().__init__()
lowercase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
lowercase_ : Tuple = nn.Parameter(torch.randn(1 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Dict ):
return x * self.a + self.b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ : int = DummyModel()
lowercase_ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase_ , lowercase_ : Any = dummy_dataloaders()
lowercase_ : int = ProjectConfiguration(total_limit=1 , project_dir=lowercase_ , automatic_checkpoint_naming=lowercase_ )
# Train baseline
lowercase_ : List[Any] = Accelerator(project_config=lowercase_ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ : List[str] = DummyModel()
lowercase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase_ , lowercase_ : Dict = dummy_dataloaders()
# Train baseline
lowercase_ : str = Accelerator()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
lowercase_ : Any = os.path.join(lowercase_ , """initial""" )
accelerator.save_state(lowercase_ )
((lowercase_) , (lowercase_)) : Tuple = model.a.item(), model.b.item()
lowercase_ : Tuple = optimizer.state_dict()
lowercase_ : int = train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((lowercase_) , (lowercase_)) : List[Any] = model.a.item(), model.b.item()
lowercase_ : Dict = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ : List[Any] = DummyModel()
lowercase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase_ , lowercase_ : List[str] = dummy_dataloaders()
lowercase_ : Optional[int] = Accelerator()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.load_state(lowercase_ )
((lowercase_) , (lowercase_)) : List[Any] = model.a.item(), model.b.item()
lowercase_ : Optional[int] = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = train(2 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save everything
lowercase_ : str = os.path.join(lowercase_ , """checkpoint""" )
accelerator.save_state(lowercase_ )
# Load everything back in and make sure all states work
accelerator.load_state(lowercase_ )
test_rands += train(1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((lowercase_) , (lowercase_)) : Tuple = model.a.item(), model.b.item()
lowercase_ : Tuple = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ : Any = DummyModel()
lowercase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase_ , lowercase_ : Dict = dummy_dataloaders()
lowercase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=lowercase_ )
# Train baseline
lowercase_ : List[Any] = Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
accelerator.save_state()
((lowercase_) , (lowercase_)) : str = model.a.item(), model.b.item()
lowercase_ : str = optimizer.state_dict()
lowercase_ : Optional[Any] = train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((lowercase_) , (lowercase_)) : str = model.a.item(), model.b.item()
lowercase_ : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ : Union[str, Any] = DummyModel()
lowercase_ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase_ , lowercase_ : Union[str, Any] = dummy_dataloaders()
lowercase_ : Dict = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowercase_ )
lowercase_ : int = Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.load_state(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_0""" ) )
((lowercase_) , (lowercase_)) : List[Any] = model.a.item(), model.b.item()
lowercase_ : List[str] = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = train(2 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((lowercase_) , (lowercase_)) : Tuple = model.a.item(), model.b.item()
lowercase_ : List[str] = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[str] = torch.tensor([1, 2, 3] )
lowercase_ : Any = torch.tensor([2, 3, 4] )
lowercase_ : Tuple = DummyModel()
lowercase_ : str = torch.optim.Adam(net.parameters() )
lowercase_ : Optional[Any] = Accelerator()
with self.assertRaises(lowercase_ ) as ve:
accelerator.register_for_checkpointing(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase_ : Optional[int] = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ : Any = DummyModel()
lowercase_ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
lowercase_ : int = torch.optim.lr_scheduler.StepLR(lowercase_ , step_size=1 , gamma=0.99 )
lowercase_ , lowercase_ : List[Any] = dummy_dataloaders()
lowercase_ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=lowercase_ )
# Train baseline
lowercase_ : Union[str, Any] = Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save initial
accelerator.save_state()
lowercase_ : Dict = scheduler.state_dict()
train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.assertNotEqual(lowercase_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(lowercase_ , scheduler.state_dict() )
def SCREAMING_SNAKE_CASE_ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ : Dict = DummyModel()
lowercase_ : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=lowercase_ , total_limit=2 )
# Train baseline
lowercase_ : Optional[Any] = Accelerator(project_dir=lowercase_ , project_config=lowercase_ )
lowercase_ : str = accelerator.prepare(lowercase_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase_ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if __name__ == "__main__":
_lowercase : Optional[int] = "/tmp/accelerate/state_checkpointing"
_lowercase : Dict = DummyModel()
_lowercase : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_lowercase : Any = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
_lowercase , _lowercase : Any = dummy_dataloaders()
_lowercase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowercase : Union[str, Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowercase , _lowercase : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowercase : Optional[int] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
_lowercase : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_lowercase : Optional[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_lowercase : Union[str, Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 30
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 1
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowercase : Dict = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
_lowercase : Tuple = {
"169M": 768,
"430M": 1024,
"1B5": 2048,
"3B": 2560,
"7B": 4096,
"14B": 5120,
}
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> List[str]:
lowercase_ : int = list(state_dict.keys() )
for name in state_dict_keys:
lowercase_ : List[str] = state_dict.pop(UpperCAmelCase__ )
# emb -> embedding
if name.startswith("""emb.""" ):
lowercase_ : str = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
lowercase_ : int = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
lowercase_ : Union[str, Any] = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCAmelCase__ )
# ffn -> feed_forward
lowercase_ : Optional[Any] = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCAmelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
lowercase_ : Any = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
lowercase_ : int = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
lowercase_ : Any = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
lowercase_ : int = """rwkv.""" + name
lowercase_ : Any = weight
return state_dict
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[Any]=None ) -> Any:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
lowercase_ : Dict = 50277
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
lowercase_ : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=UpperCAmelCase__ )
lowercase_ : str = len(UpperCAmelCase__ )
tokenizer.save_pretrained(UpperCAmelCase__ )
# 2. Build the config
lowercase_ : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowercase_ : List[str] = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
lowercase_ : Dict = RwkvConfig(
vocab_size=UpperCAmelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCAmelCase__ )
# 3. Download model file then convert state_dict
lowercase_ : Optional[Any] = hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
lowercase_ : Dict = convert_state_dict(UpperCAmelCase__ )
# 4. Split in shards and save
lowercase_ , lowercase_ : Dict = shard_checkpoint(UpperCAmelCase__ )
for shard_file, shard in shards.items():
torch.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
if index is not None:
lowercase_ : Dict = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
# Save the index as well
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
lowercase_ : Optional[Any] = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + """\n"""
f.write(UpperCAmelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
lowercase_ : Optional[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowercase_ : Optional[Any] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
lowercase_ : Dict = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__ )
model.push_to_hub(UpperCAmelCase__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
_lowercase : List[str] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
_lowercase : int = [0, 2, 4, 6, 8]
_lowercase : str = [1, 3, 5, 7, 9]
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase_ : Optional[int] = 0
for digit in range(10 ):
lowercase_ : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCAmelCase__ , UpperCAmelCase__ )
return result
lowercase_ : Tuple = 0
for digita in range(10 ):
lowercase_ : Any = digita
if (remainder + digita) % 2 == 0:
lowercase_ : Tuple = ODD_DIGITS
else:
lowercase_ : List[str] = EVEN_DIGITS
for digita in other_parity_digits:
lowercase_ : Optional[Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCAmelCase__ , UpperCAmelCase__ , )
return result
def lowerCamelCase ( UpperCAmelCase__ : int = 9 ) -> int:
lowercase_ : Tuple = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCAmelCase__ , 0 , [0] * length , UpperCAmelCase__ )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 30
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 1
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float:
lowercase_ : Union[str, Any] = x
lowercase_ : int = y
for step in range(UpperCAmelCase__ ): # noqa: B007
lowercase_ : Tuple = a * a - b * b + x
lowercase_ : int = 2 * a * b + y
lowercase_ : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image:
lowercase_ : str = Image.new("""RGB""" , (image_width, image_height) )
lowercase_ : int = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ : Dict = figure_width / image_width * image_height
lowercase_ : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ : Optional[int] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ : List[Any] = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ : Union[str, Any] = get_color_coded_rgb(UpperCAmelCase__ )
else:
lowercase_ : Optional[Any] = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Dict = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowercase : Optional[int] = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list[float] ) -> float:
lowercase_ : Union[str, Any] = 0.00
lowercase_ : Optional[int] = 0
for resistor in resistors:
if resistor <= 0:
lowercase_ : Dict = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(UpperCAmelCase__ )
first_sum += 1 / float(UpperCAmelCase__ )
index += 1
return 1 / first_sum
def lowerCamelCase ( UpperCAmelCase__ : list[float] ) -> float:
lowercase_ : int = 0.00
lowercase_ : Union[str, Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase_ : List[str] = F'''Resistor at index {index} has a negative value!'''
raise ValueError(UpperCAmelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 1
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Tuple = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''imagegpt'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , lowercase_ : Any=512 + 1 , lowercase_ : int=32 * 32 , lowercase_ : Tuple=512 , lowercase_ : str=24 , lowercase_ : List[Any]=8 , lowercase_ : str=None , lowercase_ : int="quick_gelu" , lowercase_ : int=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Any=1E-5 , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : Any=False , lowercase_ : List[Any]=False , lowercase_ : Any=False , **lowercase_ : Any , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : Optional[Any] = n_positions
lowercase_ : str = n_embd
lowercase_ : Tuple = n_layer
lowercase_ : List[str] = n_head
lowercase_ : Tuple = n_inner
lowercase_ : List[Any] = activation_function
lowercase_ : List[str] = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : str = attn_pdrop
lowercase_ : Any = layer_norm_epsilon
lowercase_ : str = initializer_range
lowercase_ : Any = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : int = scale_attn_by_inverse_layer_idx
lowercase_ : str = reorder_and_upcast_attn
lowercase_ : Optional[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : "FeatureExtractionMixin" , lowercase_ : int = 1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 3 , lowercase_ : int = 32 , lowercase_ : int = 32 , ):
lowercase_ : Union[str, Any] = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase_ : Dict = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
return inputs
| 30
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 1
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase ( UpperCAmelCase__ : Any=None ) -> Dict:
if subparsers is not None:
lowercase_ : Any = subparsers.add_parser("""env""" )
else:
lowercase_ : Tuple = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def lowerCamelCase ( UpperCAmelCase__ : int ) -> str:
lowercase_ : List[str] = torch.__version__
lowercase_ : Union[str, Any] = torch.cuda.is_available()
lowercase_ : Any = is_xpu_available()
lowercase_ : Optional[Any] = is_npu_available()
lowercase_ : List[str] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = load_config_from_file(args.config_file ).to_dict()
lowercase_ : Tuple = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ : Dict = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ : str = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ : str = accelerate_config
return info
def lowerCamelCase ( ) -> int:
lowercase_ : Optional[int] = env_command_parser()
lowercase_ : str = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 30
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
_lowercase : List[Any] = "Input must be a string of 8 numbers plus letter"
_lowercase : Any = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bool:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : str = F'''Expected string as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Tuple = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCAmelCase__ ) != 9:
raise ValueError(UpperCAmelCase__ )
try:
lowercase_ : Any = int(spanish_id_clean[0:8] )
lowercase_ : List[str] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase__ ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Any = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''distilbert'''
UpperCamelCase__ = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : List[Any] , lowercase_ : str=30522 , lowercase_ : List[Any]=512 , lowercase_ : Any=False , lowercase_ : List[str]=6 , lowercase_ : str=12 , lowercase_ : Optional[Any]=768 , lowercase_ : Any=4 * 768 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.1 , lowercase_ : Any="gelu" , lowercase_ : Tuple=0.02 , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.2 , lowercase_ : str=0 , **lowercase_ : List[str] , ):
lowercase_ : Dict = vocab_size
lowercase_ : str = max_position_embeddings
lowercase_ : Optional[Any] = sinusoidal_pos_embds
lowercase_ : Optional[Any] = n_layers
lowercase_ : List[Any] = n_heads
lowercase_ : int = dim
lowercase_ : str = hidden_dim
lowercase_ : Optional[Any] = dropout
lowercase_ : Any = attention_dropout
lowercase_ : Dict = activation
lowercase_ : Tuple = initializer_range
lowercase_ : str = qa_dropout
lowercase_ : List[str] = seq_classif_dropout
super().__init__(**lowercase_ , pad_token_id=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
if self.task == "multiple-choice":
lowercase_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 30
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any:
lowercase_ : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
lowercase_ : Optional[Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase_ : str = min(UpperCAmelCase__ , UpperCAmelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[str]=7 , lowercase_ : Tuple=3 , lowercase_ : List[str]=30 , lowercase_ : Optional[Any]=400 , lowercase_ : List[Any]=True , lowercase_ : int=None , lowercase_ : Union[str, Any]=0.9 , lowercase_ : Union[str, Any]=None , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=[0.5, 0.5, 0.5] , lowercase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
lowercase_ : str = size if size is not None else {"""shortest_edge""": 30}
lowercase_ : List[Any] = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
lowercase_ : Tuple = parent
lowercase_ : str = batch_size
lowercase_ : Any = num_channels
lowercase_ : List[str] = min_resolution
lowercase_ : int = max_resolution
lowercase_ : str = do_resize_and_center_crop
lowercase_ : List[str] = size
lowercase_ : List[Any] = crop_pct
lowercase_ : Any = crop_size
lowercase_ : Dict = do_normalize
lowercase_ : Union[str, Any] = image_mean
lowercase_ : Optional[Any] = image_std
def SCREAMING_SNAKE_CASE_ ( self : str ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Union[str, Any] = PoolFormerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """crop_pct""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
lowercase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
# Initialize image_processing
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowercase_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase_ : Union[str, Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# Initialize image_processing
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowercase_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase_ : Union[str, Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowercase_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase_ : Dict = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
while a != 0:
lowercase_ , lowercase_ : Any = b % a, a
return b
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if gcd(UpperCAmelCase__ , UpperCAmelCase__ ) != 1:
lowercase_ : Optional[Any] = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(UpperCAmelCase__ )
lowercase_ , lowercase_ , lowercase_ : str = 1, 0, a
lowercase_ , lowercase_ , lowercase_ : Tuple = 0, 1, m
while va != 0:
lowercase_ : List[Any] = ua // va
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import sys
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Optional[int]:
lowercase_ : Tuple = len(UpperCAmelCase__ )
lowercase_ : Any = [[0 for x in range(UpperCAmelCase__ )] for x in range(UpperCAmelCase__ )]
lowercase_ : List[str] = [[0 for x in range(UpperCAmelCase__ )] for x in range(UpperCAmelCase__ )]
for chain_length in range(2 , UpperCAmelCase__ ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ : Optional[int] = a + chain_length - 1
lowercase_ : List[str] = sys.maxsize
for c in range(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ : Optional[Any] = cost
lowercase_ : str = c
return matrix, sol
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str:
if i == j:
print("""A""" + str(UpperCAmelCase__ ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(UpperCAmelCase__ , UpperCAmelCase__ , optimal_solution[i][j] )
print_optiomal_solution(UpperCAmelCase__ , optimal_solution[i][j] + 1 , UpperCAmelCase__ )
print(""")""" , end=""" """ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : int = [30, 35, 15, 5, 10, 20, 25]
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ : str = matrix_chain_order(UpperCAmelCase__ )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(UpperCAmelCase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ["ConditionalDetrFeatureExtractor"]
_lowercase : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
_lowercase : int = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
_lowercase : Optional[Any] = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
_lowercase : Union[str, Any] = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : int=False ):
if return_pvalue:
lowercase_ : Optional[Any] = pearsonr(lowercase_ , lowercase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_ )[0] )}
| 30
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 30
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 1
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int="attention" ) -> Tuple:
lowercase_ : List[str] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
lowercase_ : List[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ : Any = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
lowercase_ : List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ : Union[str, Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
lowercase_ : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ : List[str] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
lowercase_ : str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ) -> List[Any]:
if split_mlp_wi:
lowercase_ : List[Any] = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
lowercase_ : Tuple = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
lowercase_ : str = (wi_a, wi_a)
else:
lowercase_ : str = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
lowercase_ : List[str] = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] ) -> int:
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowerCamelCase ( UpperCAmelCase__ : dict , *, UpperCAmelCase__ : int , UpperCAmelCase__ : bool , UpperCAmelCase__ : bool = False ) -> str:
lowercase_ : int = traverse_util.flatten_dict(variables["""target"""] )
lowercase_ : Dict = {"""/""".join(UpperCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ : Tuple = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , UpperCAmelCase__ )
lowercase_ : List[Any] = collections.OrderedDict()
# Shared embeddings.
lowercase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase_ : List[str] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , """attention""" )
lowercase_ : Optional[Any] = layer_norm
lowercase_ : Any = k.T
lowercase_ : str = o.T
lowercase_ : Union[str, Any] = q.T
lowercase_ : Optional[Any] = v.T
# Block i, layer 1 (MLP).
lowercase_ : Any = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowercase_ , lowercase_ : Dict = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" , UpperCAmelCase__ )
lowercase_ : Tuple = layer_norm
if split_mlp_wi:
lowercase_ : Tuple = wi[0].T
lowercase_ : Optional[Any] = wi[1].T
else:
lowercase_ : Optional[Any] = wi.T
lowercase_ : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ : Dict = tax_relpos_bias_lookup(
UpperCAmelCase__ , UpperCAmelCase__ , """encoder""" ).T
lowercase_ : Dict = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowercase_ : Dict = tax_relpos_bias_lookup(
UpperCAmelCase__ , 0 , """encoder""" ).T
lowercase_ : List[Any] = tax_relpos_bias_lookup(
UpperCAmelCase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase_ : List[Any] = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """self_attention""" )
lowercase_ : Tuple = layer_norm
lowercase_ : Optional[int] = k.T
lowercase_ : str = o.T
lowercase_ : List[Any] = q.T
lowercase_ : List[str] = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ : Tuple = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
lowercase_ : Union[str, Any] = layer_norm
lowercase_ : Union[str, Any] = k.T
lowercase_ : str = o.T
lowercase_ : Dict = q.T
lowercase_ : Union[str, Any] = v.T
# Block i, layer 2 (MLP).
lowercase_ : Any = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowercase_ , lowercase_ : Tuple = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" , UpperCAmelCase__ )
lowercase_ : Optional[int] = layer_norm
if split_mlp_wi:
lowercase_ : Tuple = wi[0].T
lowercase_ : Optional[Any] = wi[1].T
else:
lowercase_ : Any = wi.T
lowercase_ : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ : Tuple = tax_relpos_bias_lookup(UpperCAmelCase__ , UpperCAmelCase__ , """decoder""" ).T
lowercase_ : List[Any] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ : Tuple = old["""decoder/logits_dense/kernel"""].T
return new
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : bool ) -> Dict:
lowercase_ : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ : Tuple = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowercase_ : Any = state_dict["""shared.weight"""]
return state_dict
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
lowercase_ : Union[str, Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
lowercase_ : Tuple = convert_tax_to_pytorch(
UpperCAmelCase__ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase__ , scalable_attention=UpperCAmelCase__ )
lowercase_ : int = make_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , ) -> Union[str, Any]:
lowercase_ : Any = MTaConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ : Any = UMTaEncoderModel(UpperCAmelCase__ )
else:
lowercase_ : Any = UMTaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
_lowercase : Any = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 30
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
_lowercase : Union[str, Any] = list[list[int]]
# assigning initial values to the grid
_lowercase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_lowercase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase ( UpperCAmelCase__ : Matrix , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase ( UpperCAmelCase__ : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase ( UpperCAmelCase__ : Matrix ) -> Matrix | None:
if location := find_empty_location(UpperCAmelCase__ ):
lowercase_ , lowercase_ : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Dict = digit
if sudoku(UpperCAmelCase__ ) is not None:
return grid
lowercase_ : List[Any] = 0
return None
def lowerCamelCase ( UpperCAmelCase__ : Matrix ) -> None:
for row in grid:
for cell in row:
print(UpperCAmelCase__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
_lowercase : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 30
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 1
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowercase : Dict = object()
# For specifying empty leaf dict `{}`
_lowercase : Any = object()
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ) -> Dict:
lowercase_ : str = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(UpperCAmelCase__ ) - len(UpperCAmelCase__ ) + 1 ):
lowercase_ : Any = [x.match(UpperCAmelCase__ ) for x, y in zip(UpperCAmelCase__ , ks[i:] )]
if matches and all(UpperCAmelCase__ ):
return True
return False
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
def replace(UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ):
for rule, replacement in rules:
if _match(UpperCAmelCase__ , UpperCAmelCase__ ):
return replacement
return val
return replace
def lowerCamelCase ( ) -> Union[str, Any]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , UpperCAmelCase__ )),
(("transformer", "wte", "embedding"), P("""mp""" , UpperCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase__ , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , UpperCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase__ , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , UpperCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> str:
lowercase_ : Dict = _get_partition_rules()
lowercase_ : Optional[int] = _replacement_rules(UpperCAmelCase__ )
lowercase_ : Dict = {k: _unmatched for k in flatten_dict(UpperCAmelCase__ )}
lowercase_ : Tuple = {k: replace(UpperCAmelCase__ , UpperCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase__ ) )
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowercase : str = logging.get_logger(__name__)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ):
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 30
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase : Optional[Any] = [
"good first issue",
"feature request",
"wip",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : List[Any] = Github(os.environ["""GITHUB_TOKEN"""] )
lowercase_ : Optional[Any] = g.get_repo("""huggingface/accelerate""" )
lowercase_ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
lowercase_ : str = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCAmelCase__ : i.created_at , reverse=UpperCAmelCase__ )
lowercase_ : Optional[int] = comments[0] if len(UpperCAmelCase__ ) > 0 else None
lowercase_ : Any = dt.utcnow()
lowercase_ : List[str] = (current_time - issue.updated_at).days
lowercase_ : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 30
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 1
|
'''simple docstring'''
import numpy as np
import datasets
_lowercase : Union[str, Any] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
_lowercase : List[Any] = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
_lowercase : Any = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any] ):
# convert to numpy arrays
lowercase_ : int = np.array(lowercase_ )
lowercase_ : Dict = np.array(lowercase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
lowercase_ : Optional[Any] = X - np.mean(lowercase_ )
lowercase_ : Optional[Any] = np.cov(reference_distribution.T )
try:
lowercase_ : int = np.linalg.inv(lowercase_ )
except np.linalg.LinAlgError:
lowercase_ : str = np.linalg.pinv(lowercase_ )
lowercase_ : Tuple = np.dot(lowercase_ , lowercase_ )
lowercase_ : int = np.dot(lowercase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 30
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 1
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Tuple , lowercase_ : Distribution , lowercase_ : Tuple=None , lowercase_ : List[str]=None , lowercase_ : int=0 ):
lowercase_ : Union[str, Any] = 1.0 if scale is None else scale
lowercase_ : List[Any] = 0.0 if loc is None else loc
super().__init__(lowercase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowercase_ )] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return self.base_dist.mean * self.scale + self.loc
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self.variance.sqrt()
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : Dict[str, int] , lowercase_ : Callable[..., Tuple[torch.Tensor]] , **lowercase_ : List[Any] ):
super().__init__(**lowercase_ )
lowercase_ : List[Any] = args_dim
lowercase_ : Tuple = nn.ModuleList([nn.Linear(lowercase_ , lowercase_ ) for dim in args_dim.values()] )
lowercase_ : int = domain_map
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : torch.Tensor ):
lowercase_ : Any = [proj(lowercase_ ) for proj in self.proj]
return self.domain_map(*lowercase_ )
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : List[Any] ):
super().__init__()
lowercase_ : Dict = function
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Tuple , *lowercase_ : Optional[int] ):
return self.function(lowercase_ , *lowercase_ )
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
def __init__( self : Dict , lowercase_ : int = 1 ):
lowercase_ : str = dim
lowercase_ : int = {k: dim * self.args_dim[k] for k in self.args_dim}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] ):
if self.dim == 1:
return self.distribution_class(*lowercase_ )
else:
return Independent(self.distribution_class(*lowercase_ ) , 1 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , ):
lowercase_ : str = self._base_distribution(lowercase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowercase_ , loc=lowercase_ , scale=lowercase_ , event_dim=self.event_dim )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return () if self.dim == 1 else (self.dim,)
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return len(self.event_shape )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return 0.0
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : int ):
return ParameterProjection(
in_features=lowercase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , *lowercase_ : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( lowercase_ : torch.Tensor ):
return (x + torch.sqrt(torch.square(lowercase_ ) + 4.0 )) / 2.0
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase__ = StudentT
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor ):
lowercase_ : List[str] = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ : Optional[Any] = 2.0 + cls.squareplus(lowercase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = {"loc": 1, "scale": 1}
UpperCamelCase__ = Normal
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor ):
lowercase_ : Any = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = {"total_count": 1, "logits": 1}
UpperCamelCase__ = NegativeBinomial
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor ):
lowercase_ : int = cls.squareplus(lowercase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple ):
lowercase_ , lowercase_ : Any = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowercase_ , logits=lowercase_ )
else:
return Independent(self.distribution_class(total_count=lowercase_ , logits=lowercase_ ) , 1 )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None ):
lowercase_ , lowercase_ : Union[str, Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class __magic_name__ :
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Dict=None , lowercase_ : Optional[Any]=None ):
lowercase_ : Union[str, Any] = start
lowercase_ : List[str] = end
lowercase_ : Tuple = val
lowercase_ : Optional[Any] = (start + end) // 2
lowercase_ : Any = left
lowercase_ : str = right
def __repr__( self : Optional[int] ):
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class __magic_name__ :
def __init__( self : Any , lowercase_ : Sequence , lowercase_ : Optional[Any] ):
lowercase_ : str = collection
lowercase_ : List[Any] = function
if self.collection:
lowercase_ : int = self._build_tree(0 , len(lowercase_ ) - 1 )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : int , lowercase_ : Optional[int] ):
self._update_tree(self.root , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] ):
return self._query_range(self.root , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple ):
if start == end:
return SegmentTreeNode(lowercase_ , lowercase_ , self.collection[start] )
lowercase_ : Union[str, Any] = (start + end) // 2
lowercase_ : int = self._build_tree(lowercase_ , lowercase_ )
lowercase_ : int = self._build_tree(mid + 1 , lowercase_ )
return SegmentTreeNode(lowercase_ , lowercase_ , self.fn(left.val , right.val ) , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Optional[Any] ):
if node.start == i and node.end == i:
lowercase_ : List[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase_ , lowercase_ )
else:
self._update_tree(node.right , lowercase_ , lowercase_ )
lowercase_ : Optional[int] = self.fn(node.left.val , node.right.val )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase_ , lowercase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
if self.root is not None:
lowercase_ : Tuple = Queue()
queue.put(self.root )
while not queue.empty():
lowercase_ : int = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
_lowercase : Optional[int] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 30
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''gptj'''
UpperCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : int , lowercase_ : str=50400 , lowercase_ : Any=2048 , lowercase_ : Any=4096 , lowercase_ : int=28 , lowercase_ : List[str]=16 , lowercase_ : Optional[Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Dict="gelu_new" , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : int=0.0 , lowercase_ : Any=1E-5 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=True , lowercase_ : str=50256 , lowercase_ : List[str]=50256 , lowercase_ : str=False , **lowercase_ : str , ):
lowercase_ : Optional[Any] = vocab_size
lowercase_ : Tuple = n_positions
lowercase_ : str = n_embd
lowercase_ : Tuple = n_layer
lowercase_ : Dict = n_head
lowercase_ : Union[str, Any] = n_inner
lowercase_ : Optional[Any] = rotary_dim
lowercase_ : int = activation_function
lowercase_ : int = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Union[str, Any] = attn_pdrop
lowercase_ : Tuple = layer_norm_epsilon
lowercase_ : str = initializer_range
lowercase_ : Union[str, Any] = use_cache
lowercase_ : Optional[int] = bos_token_id
lowercase_ : Any = eos_token_id
super().__init__(
bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : PretrainedConfig , lowercase_ : str = "default" , lowercase_ : List[PatchingSpec] = None , lowercase_ : bool = False , ):
super().__init__(lowercase_ , task=lowercase_ , patching_specs=lowercase_ , use_past=lowercase_ )
if not getattr(self._config , """pad_token_id""" , lowercase_ ):
# TODO: how to do that better?
lowercase_ : Optional[int] = 0
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="""inputs""" )
lowercase_ : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowercase_ : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self._config.n_head
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
lowercase_ : Tuple = super(lowercase_ , self ).generate_dummy_inputs(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
# We need to order the input in the way they appears in the forward()
lowercase_ : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase_ , lowercase_ : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase_ : Tuple = seqlen + 2
lowercase_ : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase_ : List[Any] = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(self.num_layers )
]
lowercase_ : Any = common_inputs["""attention_mask"""]
if self.use_past:
lowercase_ : List[Any] = ordered_inputs["""attention_mask"""].dtype
lowercase_ : Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return 13
| 30
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_lowercase : List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : tuple , UpperCAmelCase__ : Path , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=False , ) -> List[str]:
output_path.parent.mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , use_external_data_format=UpperCAmelCase__ , enable_onnx_checker=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
else:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> List[Any]:
lowercase_ : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase_ : Tuple = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowercase_ : Union[str, Any] = """cpu"""
lowercase_ : List[str] = Path(UpperCAmelCase__ )
# VAE DECODER
lowercase_ : Dict = AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowercase_ : Any = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase_ : Optional[int] = vae_decoder.decode
onnx_export(
UpperCAmelCase__ , model_args=(
torch.randn(1 , UpperCAmelCase__ , 25 , 25 ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCAmelCase__ , )
del vae_decoder
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_lowercase : Dict = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 30
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Any , lowercase_ : Optional[Any] , lowercase_ : int=7 , lowercase_ : Any=3 , lowercase_ : Tuple=18 , lowercase_ : List[Any]=30 , lowercase_ : Union[str, Any]=400 , lowercase_ : Tuple=True , lowercase_ : int=None , lowercase_ : List[Any]=True , ):
lowercase_ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase_ : Any = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : int = image_size
lowercase_ : Optional[Any] = min_resolution
lowercase_ : int = max_resolution
lowercase_ : List[Any] = do_resize
lowercase_ : Tuple = size
lowercase_ : Union[str, Any] = do_normalize
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """clusters""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : str = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Optional[Any] = os.path.join(lowercase_ , """image_processor.json""" )
image_processor_first.to_json_file(lowercase_ )
lowercase_ : Optional[int] = self.image_processing_class.from_json_file(lowercase_ ).to_dict()
lowercase_ : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase_ )
lowercase_ : Union[str, Any] = self.image_processing_class.from_pretrained(lowercase_ ).to_dict()
lowercase_ : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
pass
def lowerCamelCase ( ) -> int:
lowercase_ : int = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase_ : Union[str, Any] = Image.open(dataset[4]["""file"""] )
lowercase_ : Optional[Any] = Image.open(dataset[5]["""file"""] )
lowercase_ : Optional[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Union[str, Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase_ : Tuple = prepare_images()
# test non-batched
lowercase_ : Optional[Any] = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase_ : Union[str, Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ )
# test batched
lowercase_ : int = image_processing(lowercase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase_ : Dict = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple ) -> List[str]:
lowercase_ : List[Any] = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
lowercase_ : List[Any] = DatasetInfosDict.from_directory(UpperCAmelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : DatasetInfo ) -> Union[str, Any]:
lowercase_ : Any = str(UpperCAmelCase__ )
dataset_info.write_to_directory(UpperCAmelCase__ )
lowercase_ : Tuple = DatasetInfo.from_directory(UpperCAmelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase__ , """dataset_info.json""" ) )
def lowerCamelCase ( ) -> Any:
lowercase_ : List[Any] = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowercase_ : str = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase_ : List[Any] = yaml.safe_dump(UpperCAmelCase__ )
lowercase_ : List[str] = yaml.safe_load(UpperCAmelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase ( ) -> Any:
lowercase_ : Optional[Any] = DatasetInfo()
lowercase_ : Optional[int] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : DatasetInfosDict ) -> Union[str, Any]:
lowercase_ : Optional[int] = str(UpperCAmelCase__ )
dataset_infos_dict.write_to_directory(UpperCAmelCase__ )
lowercase_ : List[str] = DatasetInfosDict.from_directory(UpperCAmelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase_ : Union[str, Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase_ : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase__ , """README.md""" ) )
| 30
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 1
|
'''simple docstring'''
import math
import sys
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
lowercase_ : Dict = """"""
try:
with open(UpperCAmelCase__ , """rb""" ) as binary_file:
lowercase_ : List[Any] = binary_file.read()
for dat in data:
lowercase_ : Dict = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
lowercase_ : Optional[Any] = {"""0""": """0""", """1""": """1"""}
lowercase_ , lowercase_ : str = """""", """"""
lowercase_ : Optional[Any] = len(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase_ : Any = lexicon[curr_string]
result += last_match_id
lowercase_ : Optional[Any] = last_match_id + """0"""
if math.loga(UpperCAmelCase__ ).is_integer():
lowercase_ : Union[str, Any] = {}
for curr_key in list(UpperCAmelCase__ ):
lowercase_ : str = lexicon.pop(UpperCAmelCase__ )
lowercase_ : Optional[int] = new_lex
lowercase_ : Optional[Any] = last_match_id + """1"""
index += 1
lowercase_ : int = """"""
return result
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> None:
lowercase_ : List[str] = 8
try:
with open(UpperCAmelCase__ , """wb""" ) as opened_file:
lowercase_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCAmelCase__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
lowercase_ : Optional[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase_ : Tuple = data_bits[counter:]
lowercase_ : str = data_bits[counter + 1 :]
return data_bits
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> None:
lowercase_ : Union[str, Any] = read_file_binary(UpperCAmelCase__ )
lowercase_ : Optional[int] = remove_prefix(UpperCAmelCase__ )
lowercase_ : Tuple = decompress_data(UpperCAmelCase__ )
write_file_binary(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import sys
_lowercase : Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> int:
lowercase_ : str = 1
for digit in s:
product *= int(UpperCAmelCase__ )
return product
def lowerCamelCase ( UpperCAmelCase__ : str = N ) -> int:
lowercase_ : Optional[int] = -sys.maxsize - 1
lowercase_ : str = n[:13]
lowercase_ : Optional[int] = 13
while cur_index < len(UpperCAmelCase__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowercase_ : Optional[int] = substr[1:] + n[cur_index]
cur_index += 1
else:
lowercase_ : Optional[int] = max(UpperCAmelCase__ , str_eval(UpperCAmelCase__ ) )
lowercase_ : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __magic_name__ ( unittest.TestCase):
def __init__( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=2 , lowercase_ : int=56 , lowercase_ : str=True , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Dict=True , lowercase_ : str=99 , lowercase_ : Optional[int]=32 , lowercase_ : List[str]=2 , lowercase_ : Dict=2 , lowercase_ : Tuple=7 , lowercase_ : List[str]="gelu_new" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : Any=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : Tuple=4 , lowercase_ : int="block_sparse" , lowercase_ : Dict=True , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[Any]=2 , lowercase_ : Any=3 , ):
lowercase_ : List[Any] = parent
lowercase_ : str = batch_size
lowercase_ : Any = seq_length
lowercase_ : Dict = is_training
lowercase_ : Union[str, Any] = use_attention_mask
lowercase_ : str = use_token_type_ids
lowercase_ : List[Any] = use_labels
lowercase_ : int = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Tuple = type_sequence_label_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = num_choices
lowercase_ : Tuple = rescale_embeddings
lowercase_ : Union[str, Any] = attention_type
lowercase_ : List[str] = use_bias
lowercase_ : Optional[Any] = block_size
lowercase_ : Union[str, Any] = num_random_blocks
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : str = None
if self.use_attention_mask:
lowercase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[str] = None
if self.use_token_type_ids:
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Any = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self : Any ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self : int ):
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
for model_class_name in self.all_model_classes:
lowercase_ : Optional[int] = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase_ : Dict = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Optional[Any] , lowercase_ : Optional[int]=None , **lowercase_ : Optional[int] ):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ )
with self.subTest("""JIT Enabled""" ):
lowercase_ : List[Any] = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase_ : Optional[int] = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int]=1E-5 , lowercase_ : Optional[Any]="outputs" , lowercase_ : Any=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.