code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 705
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : List[Any] , lowerCamelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = path_or_paths
_UpperCAmelCase : List[str] = split if split or isinstance(lowerCamelCase__ , lowerCamelCase__ ) else "train"
_UpperCAmelCase : str = features
_UpperCAmelCase : Tuple = cache_dir
_UpperCAmelCase : List[str] = keep_in_memory
_UpperCAmelCase : Optional[int] = streaming
_UpperCAmelCase : List[Any] = num_proc
_UpperCAmelCase : List[Any] = kwargs
@abstractmethod
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : List[Any] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : List[Any] , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = features
_UpperCAmelCase : List[Any] = cache_dir
_UpperCAmelCase : int = keep_in_memory
_UpperCAmelCase : Tuple = streaming
_UpperCAmelCase : List[str] = num_proc
_UpperCAmelCase : Optional[Any] = kwargs
@abstractmethod
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 706
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCamelCase__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCamelCase__ = typing.Union[np.floataa, int, float] # noqa: UP007
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.sqrt(np.sum((np.asarray(__lowerCAmelCase ) - np.asarray(__lowerCAmelCase )) ** 2 ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum((va - va) ** 2 for va, va in zip(__lowerCAmelCase , __lowerCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def __lowerCAmelCase ():
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10_000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10_000 , globals=globals() , ) )
benchmark()
| 707
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The column name of the images in the files."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = {}
if self.train_dir is not None:
_UpperCAmelCase : str = self.train_dir
if self.validation_dir is not None:
_UpperCAmelCase : List[Any] = self.validation_dir
_UpperCAmelCase : List[str] = data_files if data_files else None
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default=UpperCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase : float = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : float = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
_UpperCAmelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : Any = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : Optional[int] = ds["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[Any] = split["train"]
_UpperCAmelCase : Any = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Union[str, Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase : List[Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase : str = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
_UpperCAmelCase : Any = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase : str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
_UpperCAmelCase : int = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_UpperCAmelCase : Optional[int] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
_UpperCAmelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
if training_args.do_train:
_UpperCAmelCase : Any = ds["train"].column_names
else:
_UpperCAmelCase : List[str] = ds["validation"].column_names
if data_args.image_column_name is not None:
_UpperCAmelCase : Any = data_args.image_column_name
elif "image" in column_names:
_UpperCAmelCase : Tuple = "image"
elif "img" in column_names:
_UpperCAmelCase : Union[str, Any] = "img"
else:
_UpperCAmelCase : Any = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_UpperCAmelCase : int = image_processor.size["shortest_edge"]
else:
_UpperCAmelCase : str = (image_processor.size["height"], image_processor.size["width"])
_UpperCAmelCase : Tuple = Compose(
[
Lambda(lambda __lowerCAmelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowerCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__lowerCAmelCase ):
_UpperCAmelCase : Any = [transforms(__lowerCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Union[str, Any] = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Any = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCAmelCase )
# Compute absolute learning rate
_UpperCAmelCase : Dict = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_UpperCAmelCase : Union[str, Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_UpperCAmelCase : Any = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Tuple = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : List[str] = last_checkpoint
_UpperCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Any = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : Optional[int] = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-10 ):
_UpperCAmelCase : List[str] = a
while True:
_UpperCAmelCase : str = Decimal(__lowerCAmelCase ) - (
Decimal(eval(__lowerCAmelCase ) ) / Decimal(eval(str(diff(__lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowerCAmelCase ) ) < precision: # noqa: S307
return float(__lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 710
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 0
|
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : Callable , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ) ->Dict:
'''simple docstring'''
super().__init__(
features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Tuple = Generator(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , generator=lowerCamelCase__ , gen_kwargs=lowerCamelCase__ , **lowerCamelCase__ , )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
if self.streaming:
_UpperCAmelCase : int = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
_UpperCAmelCase : Dict = None
_UpperCAmelCase : str = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
_UpperCAmelCase : List[Any] = self.builder.as_dataset(
split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 711
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = analyze_text(__lowerCAmelCase )
_UpperCAmelCase : Dict = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCAmelCase : Dict = sum(single_char_strings.values() )
# one length string
_UpperCAmelCase : Any = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCAmelCase : Any = single_char_strings[ch]
_UpperCAmelCase : Any = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
_UpperCAmelCase : Tuple = sum(two_char_strings.values() )
_UpperCAmelCase : Any = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCAmelCase : List[Any] = cha + cha
if sequence in two_char_strings:
_UpperCAmelCase : Tuple = two_char_strings[sequence]
_UpperCAmelCase : str = int(__lowerCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCAmelCase )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = Counter() # type: ignore
_UpperCAmelCase : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCAmelCase ():
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 712
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCamelCase__ = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = "sew-d"
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : Any=7_68 , lowerCamelCase__ : int=12 , lowerCamelCase__ : str=12 , lowerCamelCase__ : List[str]=30_72 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : List[str]=5_12 , lowerCamelCase__ : List[str]=2_56 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=("p2c", "c2p") , lowerCamelCase__ : List[Any]="layer_norm" , lowerCamelCase__ : Optional[int]="gelu_python" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Tuple=0.0_2 , lowerCamelCase__ : Any=1E-7 , lowerCamelCase__ : Dict=1E-5 , lowerCamelCase__ : List[Any]="group" , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : List[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__ : Tuple=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Tuple=1_28 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[Any]=0.0_5 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : Any=10 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : Dict="mean" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : int=False , lowerCamelCase__ : Tuple=2_56 , lowerCamelCase__ : Dict=0 , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : List[str]=2 , **lowerCamelCase__ : Union[str, Any] , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Any = feat_extract_norm
_UpperCAmelCase : Optional[Any] = feat_extract_activation
_UpperCAmelCase : Any = list(lowerCamelCase__ )
_UpperCAmelCase : Tuple = list(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(lowerCamelCase__ )
_UpperCAmelCase : List[str] = conv_bias
_UpperCAmelCase : Tuple = num_conv_pos_embeddings
_UpperCAmelCase : Tuple = num_conv_pos_embedding_groups
_UpperCAmelCase : Tuple = len(self.conv_dim )
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : Tuple = squeeze_factor
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : Tuple = position_buckets
_UpperCAmelCase : Any = share_att_key
_UpperCAmelCase : str = relative_attention
_UpperCAmelCase : Union[str, Any] = norm_rel_ebd
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : int = feat_proj_dropout
_UpperCAmelCase : List[str] = final_dropout
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : List[Any] = feature_layer_norm_eps
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : List[str] = apply_spec_augment
_UpperCAmelCase : List[str] = mask_time_prob
_UpperCAmelCase : Tuple = mask_time_length
_UpperCAmelCase : List[str] = mask_time_min_masks
_UpperCAmelCase : Tuple = mask_feature_prob
_UpperCAmelCase : Optional[Any] = mask_feature_length
_UpperCAmelCase : List[str] = mask_feature_min_masks
# ctc loss
_UpperCAmelCase : Dict = ctc_loss_reduction
_UpperCAmelCase : Tuple = ctc_zero_infinity
# sequence classification
_UpperCAmelCase : List[str] = use_weighted_layer_sum
_UpperCAmelCase : Optional[Any] = classifier_proj_size
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 714
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 0
|
'''simple docstring'''
class lowerCAmelCase__ :
def __init__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = name
_UpperCAmelCase : List[str] = val
def __str__( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] ) ->str:
'''simple docstring'''
return self.val < other.val
class lowerCAmelCase__ :
def __init__( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Tuple = self.build_heap(lowerCamelCase__ )
def __getitem__( self : Union[str, Any] , lowerCamelCase__ : Dict ) ->Dict:
'''simple docstring'''
return self.get_value(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[Any] ) ->int:
'''simple docstring'''
return (idx - 1) // 2
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Tuple ) ->Dict:
'''simple docstring'''
return idx * 2 + 2
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[int] ) ->List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) - 1
_UpperCAmelCase : List[str] = self.get_parent_idx(lowerCamelCase__ )
for idx, i in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : List[str] = idx
_UpperCAmelCase : Tuple = i.val
for i in range(lowerCamelCase__ , -1 , -1 ):
self.sift_down(lowerCamelCase__ , lowerCamelCase__ )
return array
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase : List[Any] = self.get_left_child_idx(lowerCamelCase__ ) # noqa: E741
_UpperCAmelCase : Dict = self.get_right_child_idx(lowerCamelCase__ )
_UpperCAmelCase : int = idx
if l < len(lowerCamelCase__ ) and array[l] < array[idx]:
_UpperCAmelCase : Optional[int] = l
if r < len(lowerCamelCase__ ) and array[r] < array[smallest]:
_UpperCAmelCase : Optional[int] = r
if smallest != idx:
_UpperCAmelCase : int = array[smallest], array[idx]
(
_UpperCAmelCase
) : Optional[int] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_UpperCAmelCase : Union[str, Any] = smallest
else:
break
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_parent_idx(lowerCamelCase__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
_UpperCAmelCase : Optional[int] = self.heap[idx], self.heap[p]
_UpperCAmelCase : Dict = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_UpperCAmelCase : Optional[int] = p
_UpperCAmelCase : str = self.get_parent_idx(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.heap[0]
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.heap[-1], self.heap[0]
_UpperCAmelCase : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_UpperCAmelCase : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Dict ) ->Optional[int]:
'''simple docstring'''
self.heap.append(lowerCamelCase__ )
_UpperCAmelCase : int = len(self.heap ) - 1
_UpperCAmelCase : int = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_UpperCAmelCase : int = new_value
_UpperCAmelCase : Union[str, Any] = new_value
self.sift_up(self.idx_of_element[node] )
lowerCamelCase__ = Node('R', -1)
lowerCamelCase__ = Node('B', 6)
lowerCamelCase__ = Node('A', 3)
lowerCamelCase__ = Node('X', 1)
lowerCamelCase__ = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = "gpt_bigcode"
lowerCAmelCase : Any = ["past_key_values"]
lowerCAmelCase : int = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , lowerCamelCase__ : str=5_02_57 , lowerCamelCase__ : str=10_24 , lowerCamelCase__ : Optional[Any]=7_68 , lowerCamelCase__ : Dict=12 , lowerCamelCase__ : int=12 , lowerCamelCase__ : int=None , lowerCamelCase__ : Optional[int]="gelu_pytorch_tanh" , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Dict=1E-5 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=5_02_56 , lowerCamelCase__ : Union[str, Any]=5_02_56 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any=True , **lowerCamelCase__ : Optional[Any] , ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Union[str, Any] = n_positions
_UpperCAmelCase : Optional[int] = n_embd
_UpperCAmelCase : Optional[int] = n_layer
_UpperCAmelCase : Tuple = n_head
_UpperCAmelCase : Dict = n_inner
_UpperCAmelCase : Any = activation_function
_UpperCAmelCase : Tuple = resid_pdrop
_UpperCAmelCase : Tuple = embd_pdrop
_UpperCAmelCase : Dict = attn_pdrop
_UpperCAmelCase : List[str] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : int = scale_attn_weights
_UpperCAmelCase : Union[str, Any] = use_cache
_UpperCAmelCase : List[str] = attention_softmax_in_fpaa
_UpperCAmelCase : List[str] = scale_attention_softmax_in_fpaa
_UpperCAmelCase : Any = multi_query
_UpperCAmelCase : int = bos_token_id
_UpperCAmelCase : List[str] = eos_token_id
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 0
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __lowerCAmelCase (__lowerCAmelCase = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __lowerCAmelCase (__lowerCAmelCase = "" ):
if len(__lowerCAmelCase ) == 0:
return True
_UpperCAmelCase : Tuple = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_UpperCAmelCase : int = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1
_UpperCAmelCase : Dict = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCAmelCase (__lowerCAmelCase = "" ):
print("\nFor string = " , __lowerCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
lowerCamelCase__ = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
lowerCamelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ): # This function is recursive
_UpperCAmelCase : Tuple = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_UpperCAmelCase : List[Any] = array[0]
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[Any] = [element for element in array[i:] if element >= array[i]]
_UpperCAmelCase : List[str] = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = temp_array
else:
i += 1
_UpperCAmelCase : List[str] = [element for element in array[1:] if element >= pivot]
_UpperCAmelCase : Tuple = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCamelCase__ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCamelCase__ = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
lowerCamelCase__ = '▁'
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : Optional[int]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Optional[int]="<unk>" , lowerCamelCase__ : List[str]="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : List[Any] , ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
_UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_UpperCAmelCase : Tuple = vocab_file
_UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase : List[str] = len(self.sp_model ) - 1
_UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
_UpperCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict ) ->str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Tuple = ""
_UpperCAmelCase : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
_UpperCAmelCase : str = True
_UpperCAmelCase : List[Any] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = self.__dict__.copy()
_UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : str = {}
_UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , "wb" ) as fi:
_UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 720
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = "bloom"
lowerCAmelCase : Any = ["past_key_values"]
lowerCAmelCase : Union[str, Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Any , lowerCamelCase__ : List[Any]=25_08_80 , lowerCamelCase__ : List[str]=64 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[Any]=8 , lowerCamelCase__ : Any=1E-5 , lowerCamelCase__ : str=0.0_2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : str=1 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Any=False , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : List[str]=False , **lowerCamelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop("n_embed" , lowerCamelCase__ )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : Optional[Any] = n_layer
_UpperCAmelCase : Dict = n_head
_UpperCAmelCase : Union[str, Any] = layer_norm_epsilon
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Optional[Any] = use_cache
_UpperCAmelCase : Dict = pretraining_tp
_UpperCAmelCase : int = apply_residual_connection_post_layernorm
_UpperCAmelCase : str = hidden_dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : int = bos_token_id
_UpperCAmelCase : List[str] = eos_token_id
_UpperCAmelCase : List[str] = slow_but_exact
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.12" )
def __init__( self : List[Any] , lowerCamelCase__ : PretrainedConfig , lowerCamelCase__ : str = "default" , lowerCamelCase__ : List[PatchingSpec] = None , lowerCamelCase__ : bool = False , ) ->int:
'''simple docstring'''
super().__init__(lowerCamelCase__ , task=lowerCamelCase__ , patching_specs=lowerCamelCase__ , use_past=lowerCamelCase__ )
if not getattr(self._config , "pad_token_id" , lowerCamelCase__ ):
# TODO: how to do that better?
_UpperCAmelCase : Union[str, Any] = 0
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCAmelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase__ , direction="inputs" , inverted_values_shape=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCAmelCase__ ( self : Dict ) ->int:
'''simple docstring'''
return self._config.n_layer
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
return self._config.n_head
@property
def lowerCAmelCase__ ( self : Any ) ->float:
'''simple docstring'''
return 1E-3
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : "PreTrainedTokenizer" , lowerCamelCase__ : int = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional["TensorType"] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = super(lowerCamelCase__ , self ).generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase : Optional[int] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase : Optional[Any] = seqlen + 2
_UpperCAmelCase : int = self._config.hidden_size // self.num_attention_heads
_UpperCAmelCase : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCAmelCase : str = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCAmelCase : Optional[int] = [
(torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers )
]
_UpperCAmelCase : str = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase : str = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCamelCase__ , lowerCamelCase__ , dtype=lowerCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
return 13
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
'''simple docstring'''
import os
from collections.abc import Iterator
def __lowerCAmelCase (__lowerCAmelCase = "." ):
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("./" )
def __lowerCAmelCase (__lowerCAmelCase ):
return F"""{i * ' '}*""" if i else "\n##"
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(__lowerCAmelCase )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def __lowerCAmelCase (__lowerCAmelCase = "." ):
_UpperCAmelCase : Tuple = ""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
_UpperCAmelCase : List[Any] = os.path.split(__lowerCAmelCase )
if filepath != old_path:
_UpperCAmelCase : Union[str, Any] = print_path(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
_UpperCAmelCase : Tuple = F"""{filepath}/{filename}""".replace(" " , "%20" )
_UpperCAmelCase : Any = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F"""{md_prefix(__lowerCAmelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCamelCase__ = True
from torch.cuda.amp import autocast
lowerCamelCase__ = logging.getLogger(__name__)
def __lowerCAmelCase (__lowerCAmelCase=None , __lowerCAmelCase=None ):
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase : Optional[bool] = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCAmelCase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
lowerCAmelCase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
lowerCAmelCase : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
lowerCAmelCase : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
lowerCAmelCase : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
lowerCAmelCase : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
lowerCAmelCase : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : WavaVecaProcessor
lowerCAmelCase : Union[bool, str] = True
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[int] = None
def __call__( self : List[Any] , lowerCamelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) ->Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [{"input_values": feature["input_values"]} for feature in features]
_UpperCAmelCase : List[Any] = [{"input_ids": feature["labels"]} for feature in features]
_UpperCAmelCase : List[str] = self.processor.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
_UpperCAmelCase : str = self.processor.pad(
labels=lowerCamelCase__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
_UpperCAmelCase : Any = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
_UpperCAmelCase : Any = labels
return batch
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : Dict[str, Union[torch.Tensor, Any]] ) ->torch.Tensor:
'''simple docstring'''
model.train()
_UpperCAmelCase : Optional[int] = self._prepare_inputs(lowerCamelCase__ )
if self.use_amp:
with autocast():
_UpperCAmelCase : Tuple = self.compute_loss(lowerCamelCase__ , lowerCamelCase__ )
else:
_UpperCAmelCase : Union[str, Any] = self.compute_loss(lowerCamelCase__ , lowerCamelCase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCAmelCase : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCAmelCase : Optional[Any] = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCAmelCase : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase__ )
else:
loss.backward()
return loss.detach()
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , __lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCAmelCase : List[str] = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
_UpperCAmelCase : Dict = F"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__lowerCAmelCase ):
_UpperCAmelCase : List[str] = re.sub(__lowerCAmelCase , "" , batch["sentence"] ).lower() + " "
return batch
_UpperCAmelCase : str = train_dataset.map(__lowerCAmelCase , remove_columns=["sentence"] )
_UpperCAmelCase : List[str] = eval_dataset.map(__lowerCAmelCase , remove_columns=["sentence"] )
def extract_all_chars(__lowerCAmelCase ):
_UpperCAmelCase : str = " ".join(batch["text"] )
_UpperCAmelCase : Dict = list(set(__lowerCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCAmelCase : Tuple = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=-1 , keep_in_memory=__lowerCAmelCase , remove_columns=train_dataset.column_names , )
_UpperCAmelCase : List[str] = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=-1 , keep_in_memory=__lowerCAmelCase , remove_columns=eval_dataset.column_names , )
_UpperCAmelCase : List[Any] = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
_UpperCAmelCase : Dict = {v: k for k, v in enumerate(__lowerCAmelCase )}
_UpperCAmelCase : Dict = vocab_dict[" "]
del vocab_dict[" "]
_UpperCAmelCase : Tuple = len(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Tuple = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
_UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCAmelCase : int = min(len(__lowerCAmelCase ) , data_args.max_train_samples )
_UpperCAmelCase : Optional[int] = train_dataset.select(range(__lowerCAmelCase ) )
if data_args.max_val_samples is not None:
_UpperCAmelCase : List[Any] = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCAmelCase : Union[str, Any] = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = torchaudio.load(batch["path"] )
_UpperCAmelCase : int = resampler(__lowerCAmelCase ).squeeze().numpy()
_UpperCAmelCase : str = 16_000
_UpperCAmelCase : Tuple = batch["text"]
return batch
_UpperCAmelCase : List[str] = train_dataset.map(
__lowerCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCAmelCase : Tuple = eval_dataset.map(
__lowerCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__lowerCAmelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCAmelCase : Optional[Any] = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(__lowerCAmelCase )
return batch
_UpperCAmelCase : int = train_dataset.map(
__lowerCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
_UpperCAmelCase : Tuple = eval_dataset.map(
__lowerCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCAmelCase : List[Any] = datasets.load_metric("wer" )
def compute_metrics(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = pred.predictions
_UpperCAmelCase : Optional[int] = np.argmax(__lowerCAmelCase , axis=-1 )
_UpperCAmelCase : Any = processor.tokenizer.pad_token_id
_UpperCAmelCase : List[Any] = processor.batch_decode(__lowerCAmelCase )
# we do not want to group tokens when computing the metrics
_UpperCAmelCase : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=__lowerCAmelCase )
_UpperCAmelCase : str = wer_metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCAmelCase : str = DataCollatorCTCWithPadding(processor=__lowerCAmelCase , padding=__lowerCAmelCase )
# Initialize our Trainer
_UpperCAmelCase : Optional[int] = CTCTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase : Dict = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase : Optional[Any] = model_args.model_name_or_path
else:
_UpperCAmelCase : Dict = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
_UpperCAmelCase : List[Any] = train_result.metrics
_UpperCAmelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase )
)
_UpperCAmelCase : Tuple = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("train" , __lowerCAmelCase )
trainer.save_metrics("train" , __lowerCAmelCase )
trainer.save_state()
# Evaluation
_UpperCAmelCase : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : Tuple = trainer.evaluate()
_UpperCAmelCase : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(__lowerCAmelCase )
_UpperCAmelCase : int = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Any=13 , lowerCamelCase__ : str=30 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : int=5 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : Any=0.0_2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Dict=2 , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : List[str] = image_size
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Any = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = scope
_UpperCAmelCase : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCAmelCase : Tuple = num_patches + 2
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = DeiTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = DeiTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : int = 1
_UpperCAmelCase : Optional[int] = DeiTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = self.type_sequence_label_size
_UpperCAmelCase : Optional[Any] = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Any = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Tuple = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : Optional[int] = config_and_inputs
_UpperCAmelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Optional[Any] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[Any] = False
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = DeiTModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
_UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str]=False ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : List[str] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : Any = model(**lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase : int = False
_UpperCAmelCase : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : str = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = model(**lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
_UpperCAmelCase : Any = problem_type["title"]
_UpperCAmelCase : Dict = problem_type["num_labels"]
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Tuple = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
_UpperCAmelCase : List[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_UpperCAmelCase : Union[str, Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
_UpperCAmelCase : Union[str, Any] = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = DeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Dict = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : int = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Any = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : int = prepare_img()
_UpperCAmelCase : Dict = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
_UpperCAmelCase : Any = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ )
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowerCAmelCase (__lowerCAmelCase="" ) -> Any:
_UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(__lowerCAmelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
_UpperCAmelCase : Any = AgentAudio(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_UpperCAmelCase : Optional[Any] = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , torch.tensor(lowerCamelCase__ ) , atol=1E-4 ) )
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
_UpperCAmelCase : str = get_new_path(suffix=".wav" )
sf.write(lowerCamelCase__ , lowerCamelCase__ , 1_60_00 )
_UpperCAmelCase : List[Any] = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCamelCase__ )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.randint(0 , 2_56 , (64, 64, 3) )
_UpperCAmelCase : Optional[Any] = AgentImage(lowerCamelCase__ )
_UpperCAmelCase : Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
_UpperCAmelCase : Dict = Image.open(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
_UpperCAmelCase : Any = Image.open(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "Hey!"
_UpperCAmelCase : Union[str, Any] = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , agent_type.to_string() )
self.assertEqual(lowerCamelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 704
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
lowerCamelCase__ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 705
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
def __lowerCAmelCase ():
_UpperCAmelCase : str = [90, 23, 6, 33, 21, 65, 123, 34_423]
_UpperCAmelCase : List[str] = math.log(len(__lowerCAmelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 706
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 707
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
if attention_mask is None:
_UpperCAmelCase : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_UpperCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_UpperCAmelCase : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase__ :
def __init__( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any]=13 , lowerCamelCase__ : int=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Optional[Any]=99 , lowerCamelCase__ : str=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : str=2 , lowerCamelCase__ : str=1 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : Optional[Any]=0.0_2 , ) ->int:
'''simple docstring'''
_UpperCAmelCase : int = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : int = seq_length
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : List[str] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Optional[Any] = eos_token_id
_UpperCAmelCase : Optional[Any] = pad_token_id
_UpperCAmelCase : str = bos_token_id
_UpperCAmelCase : Dict = initializer_range
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCAmelCase : List[str] = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
_UpperCAmelCase : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = prepare_blenderbot_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[str] = 20
_UpperCAmelCase : Optional[int] = model_class_name(lowerCamelCase__ )
_UpperCAmelCase : int = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase : Union[str, Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCAmelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
_UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase__ , )
_UpperCAmelCase : Any = model.decode(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = 20
_UpperCAmelCase : List[Any] = model_class_name(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase : Optional[int] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
_UpperCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase__ , decoder_position_ids=lowerCamelCase__ , )
_UpperCAmelCase : Tuple = model.decode(lowerCamelCase__ , lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase : Tuple = 99
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCAmelCase : Optional[Any] = input_ids.shape[0]
_UpperCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self._get_config_and_data()
_UpperCAmelCase : Dict = FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ )
_UpperCAmelCase : List[str] = lm_model(input_ids=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCAmelCase : List[str] = FlaxBlenderbotForConditionalGeneration(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCAmelCase : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCAmelCase : Union[str, Any] = lm_model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCAmelCase : Any = shift_tokens_right(lowerCamelCase__ , 1 , 2 )
_UpperCAmelCase : Union[str, Any] = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
_UpperCAmelCase : Optional[int] = np.equal(lowerCamelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase , UpperCAmelCase__ ):
lowerCAmelCase : int = True
lowerCAmelCase : Tuple = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase : List[str] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxBlenderbotModelTester(self )
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : List[str] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__ : Dict , lowerCamelCase__ : str=None , **lowerCamelCase__ : List[str] ):
return model.encode(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Any = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : Dict = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Tuple = model_class(lowerCamelCase__ )
_UpperCAmelCase : Dict = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCAmelCase : str = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int ):
return model.decode(
decoder_input_ids=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , encoder_outputs=lowerCamelCase__ , )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Any = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : int = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCAmelCase : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCAmelCase : int = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
_UpperCAmelCase : Any = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
_UpperCAmelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
_UpperCAmelCase : Tuple = ["Sam"]
_UpperCAmelCase : Dict = tokenizer(lowerCamelCase__ , return_tensors="jax" )
_UpperCAmelCase : Tuple = model.generate(**lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = "Sam is a great name. It means \"sun\" in Gaelic."
_UpperCAmelCase : str = tokenizer.batch_decode(lowerCamelCase__ , **lowerCamelCase__ )
assert generated_txt[0].strip() == tgt_text
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ):
return [ord(__lowerCAmelCase ) - 96 for elem in plain]
def __lowerCAmelCase (__lowerCAmelCase ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __lowerCAmelCase ():
_UpperCAmelCase : int = encode(input("-> " ).strip().lower() )
print("Encoded: " , __lowerCAmelCase )
print("Decoded:" , decode(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 710
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 0
|
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCamelCase__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[int]=32 ) ->str:
'''simple docstring'''
set_seed(0 )
_UpperCAmelCase : int = UNetaDModel(sample_size=lowerCamelCase__ , in_channels=3 , out_channels=3 )
_UpperCAmelCase : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCAmelCase : str = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=lowerCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_UpperCAmelCase : int = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCamelCase__ ) for _ in range(4 )]
_UpperCAmelCase : int = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase__ ) for _ in range(4 )]
_UpperCAmelCase : Any = [torch.randint(0 , 10_00 , (4,) ).long().to(lowerCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
_UpperCAmelCase : Any = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
_UpperCAmelCase : Union[str, Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_UpperCAmelCase : Union[str, Any] = model(lowerCamelCase__ , timesteps[i] ).sample
_UpperCAmelCase : int = torch.nn.functional.mse_loss(lowerCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCAmelCase : Any = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
_UpperCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ , timesteps[i] ).sample
_UpperCAmelCase : Union[str, Any] = torch.nn.functional.mse_loss(lowerCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
| 711
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 0
|
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = k_size // 2
_UpperCAmelCase : Dict = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_UpperCAmelCase : List[str] = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) )
return g
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = image.shape[0], image.shape[1]
# dst image height and width
_UpperCAmelCase : Tuple = height - k_size + 1
_UpperCAmelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_UpperCAmelCase : Union[str, Any] = zeros((dst_height * dst_width, k_size * k_size) )
_UpperCAmelCase : List[Any] = 0
for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ):
_UpperCAmelCase : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
_UpperCAmelCase : List[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_UpperCAmelCase : Tuple = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = ravel(__lowerCAmelCase )
# reshape and get the dst image
_UpperCAmelCase : List[str] = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
return dst
if __name__ == "__main__":
# read original image
lowerCamelCase__ = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowerCamelCase__ = gaussian_filter(gray, 3, sigma=1)
lowerCamelCase__ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 712
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase__ = 'src/diffusers'
lowerCamelCase__ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
lowerCamelCase__ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCamelCase__ = spec.loader.load_module()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return line.startswith(__lowerCAmelCase ) or len(__lowerCAmelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , __lowerCAmelCase ) is not None
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = object_name.split("." )
_UpperCAmelCase : Dict = 0
# First let's find the module where our object lives.
_UpperCAmelCase : List[str] = parts[i]
while i < len(__lowerCAmelCase ) and not os.path.isfile(os.path.join(__lowerCAmelCase , F"""{module}.py""" ) ):
i += 1
if i < len(__lowerCAmelCase ):
_UpperCAmelCase : Tuple = os.path.join(__lowerCAmelCase , parts[i] )
if i >= len(__lowerCAmelCase ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCAmelCase , F"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : Union[str, Any] = f.readlines()
# Now let's find the class / func in the code!
_UpperCAmelCase : Dict = ""
_UpperCAmelCase : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCAmelCase ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCAmelCase ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCAmelCase : Optional[Any] = line_index
while line_index < len(__lowerCAmelCase ) and _should_continue(lines[line_index] , __lowerCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase : Optional[Any] = lines[start_index:line_index]
return "".join(__lowerCAmelCase )
lowerCamelCase__ = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
lowerCamelCase__ = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
lowerCamelCase__ = re.compile(r'<FILL\s+[^>]*>')
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = code.split("\n" )
_UpperCAmelCase : Union[str, Any] = 0
while idx < len(__lowerCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCAmelCase ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = len(get_indent(__lowerCAmelCase ) ) > 0
if has_indent:
_UpperCAmelCase : int = F"""class Bla:\n{code}"""
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = style_docstrings_in_code(__lowerCAmelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False ):
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : int = f.readlines()
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCAmelCase : str = search.groups()
_UpperCAmelCase : Union[str, Any] = find_code_in_diffusers(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_indent(__lowerCAmelCase )
_UpperCAmelCase : str = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCAmelCase : Dict = theoretical_indent
_UpperCAmelCase : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCAmelCase : Union[str, Any] = True
while line_index < len(__lowerCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCAmelCase ):
break
_UpperCAmelCase : Optional[Any] = lines[line_index]
_UpperCAmelCase : Tuple = _should_continue(__lowerCAmelCase , __lowerCAmelCase ) and re.search(F"""^{indent}# End copy""" , __lowerCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase : int = lines[start_index:line_index]
_UpperCAmelCase : Tuple = "".join(__lowerCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCAmelCase : List[str] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCAmelCase ) is None]
_UpperCAmelCase : Union[str, Any] = "\n".join(__lowerCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase : Tuple = replace_pattern.replace("with" , "" ).split("," )
_UpperCAmelCase : Union[str, Any] = [_re_replace_pattern.search(__lowerCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCAmelCase : int = pattern.groups()
_UpperCAmelCase : Optional[int] = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if option.strip() == "all-casing":
_UpperCAmelCase : str = re.sub(obja.lower() , obja.lower() , __lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = re.sub(obja.upper() , obja.upper() , __lowerCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCAmelCase : List[Any] = blackify(lines[start_index - 1] + theoretical_code )
_UpperCAmelCase : Any = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCAmelCase : Dict = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCAmelCase : List[str] = start_index + 1
if overwrite and len(__lowerCAmelCase ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowerCAmelCase )
return diffs
def __lowerCAmelCase (__lowerCAmelCase = False ):
_UpperCAmelCase : str = glob.glob(os.path.join(__lowerCAmelCase , "**/*.py" ) , recursive=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = []
for filename in all_files:
_UpperCAmelCase : Optional[int] = is_copy_consistent(__lowerCAmelCase , __lowerCAmelCase )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCAmelCase ) > 0:
_UpperCAmelCase : int = "\n".join(__lowerCAmelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCamelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
import math
import random
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase__ = 0.02
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__lowerCAmelCase ):
# Forward propagation
_UpperCAmelCase : int = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_UpperCAmelCase : int = (expected / 100) - layer_a
# Error delta
_UpperCAmelCase : Union[str, Any] = layer_1_error * sigmoid_function(__lowerCAmelCase , __lowerCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = int(input('Expected value: '))
lowerCamelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 714
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = jnp.ones((batch_size, length) ) / length
return scores
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Dict = 20
_UpperCAmelCase : List[str] = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase__ )
# tweak scores to not be uniform anymore
_UpperCAmelCase : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCAmelCase : Optional[Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCAmelCase : List[Any] = jax.nn.softmax(lowerCamelCase__ , axis=-1 )
_UpperCAmelCase : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCAmelCase : List[Any] = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__ ) , axis=-1 )
_UpperCAmelCase : Any = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = None
_UpperCAmelCase : int = 10
_UpperCAmelCase : Tuple = 2
# create ramp distribution
_UpperCAmelCase : Optional[int] = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCAmelCase : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCAmelCase : int = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : List[str] = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCAmelCase : str = 5
_UpperCAmelCase : Any = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCAmelCase : int = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, length) ).copy()
_UpperCAmelCase : List[Any] = top_k_warp_safety_check(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = 10
_UpperCAmelCase : int = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCAmelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
_UpperCAmelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
_UpperCAmelCase : List[Any] = np.exp(top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCAmelCase : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCAmelCase : Optional[Any] = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCAmelCase : str = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
_UpperCAmelCase : int = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCAmelCase : Tuple = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = 20
_UpperCAmelCase : str = 4
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
# check that min length is applied at length 5
_UpperCAmelCase : Union[str, Any] = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCAmelCase : int = 5
_UpperCAmelCase : List[Any] = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Any = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
_UpperCAmelCase : Dict = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = 15
_UpperCAmelCase : str = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = 20
_UpperCAmelCase : int = 4
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
# check that all scores are -inf except the bos_token_id score
_UpperCAmelCase : int = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCAmelCase : Any = 1
_UpperCAmelCase : str = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : int = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCAmelCase : int = 3
_UpperCAmelCase : str = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = 20
_UpperCAmelCase : Union[str, Any] = 4
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : str = 5
_UpperCAmelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCAmelCase : Tuple = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : Any = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : int = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : Dict = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : int = 10
_UpperCAmelCase : str = 15
_UpperCAmelCase : int = 2
_UpperCAmelCase : str = 1
_UpperCAmelCase : str = 15
# dummy input_ids and scores
_UpperCAmelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , lowerCamelCase__ )
_UpperCAmelCase : List[str] = input_ids.copy()
_UpperCAmelCase : List[Any] = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = scores.copy()
# instantiate all dist processors
_UpperCAmelCase : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCAmelCase : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
_UpperCAmelCase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = 10
# no processor list
_UpperCAmelCase : Tuple = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
_UpperCAmelCase : Dict = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
_UpperCAmelCase : str = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
_UpperCAmelCase : List[str] = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
_UpperCAmelCase : int = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# with processor list
_UpperCAmelCase : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCAmelCase : Dict = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = 4
_UpperCAmelCase : Tuple = 10
_UpperCAmelCase : Optional[int] = 15
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Dict = 15
# dummy input_ids and scores
_UpperCAmelCase : List[str] = ids_tensor((batch_size, sequence_length) , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = input_ids.copy()
_UpperCAmelCase : Optional[Any] = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : int = scores.copy()
# instantiate all dist processors
_UpperCAmelCase : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : Any = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCAmelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__ )
_UpperCAmelCase : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : int ):
_UpperCAmelCase : Optional[Any] = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
_UpperCAmelCase : str = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
_UpperCAmelCase : Any = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
_UpperCAmelCase : Tuple = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
return scores
# with processor list
def run_processor_list(lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
_UpperCAmelCase : int = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCAmelCase : Optional[int] = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
return scores
_UpperCAmelCase : Tuple = jax.jit(lowerCamelCase__ )
_UpperCAmelCase : str = jax.jit(lowerCamelCase__ )
_UpperCAmelCase : List[str] = jitted_run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = jitted_run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 715
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 0
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
_UpperCAmelCase : int = AutoConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase : str = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 716
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 0
|
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase__ = HfArgumentParser(InitializationArguments)
lowerCamelCase__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase__ = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[Any] = torch.nn.Linear(2 , 4 )
_UpperCAmelCase : List[str] = torch.optim.AdamW(model.parameters() , lr=1.0 )
_UpperCAmelCase : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(__lowerCAmelCase , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
_UpperCAmelCase : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_UpperCAmelCase : str = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCAmelCase (__lowerCAmelCase ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__lowerCAmelCase )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@require_cuda
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : int = Accelerator(cpu=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = Accelerator()
_UpperCAmelCase : List[Any] = GradientState()
assert state.num_steps == 1
_UpperCAmelCase : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_UpperCAmelCase : List[str] = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = Accelerator()
_UpperCAmelCase : int = create_components()
(
_UpperCAmelCase
) : Dict = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = Accelerator()
_UpperCAmelCase : int = create_components()
accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def lowerCAmelCase__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ):
pass
with patch("torch.cuda.set_device" , lowerCamelCase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
_UpperCAmelCase : Optional[int] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Accelerator()
_UpperCAmelCase : Dict = create_components()
accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = get_signature(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase__ )
# make sure random weights don't match
load_random_weights(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) < 1E-3 )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = Accelerator()
_UpperCAmelCase : Optional[Any] = create_components()
accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = get_signature(lowerCamelCase__ )
# saving hook
def save_config(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Dict ):
_UpperCAmelCase : Union[str, Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowerCamelCase__ , "data.json" ) , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
# loading hook
def load_config(lowerCamelCase__ : int , lowerCamelCase__ : int ):
with open(os.path.join(lowerCamelCase__ , "data.json" ) , "r" ) as f:
_UpperCAmelCase : List[str] = json.load(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = config["class_name"]
_UpperCAmelCase : List[Any] = accelerator.register_save_state_pre_hook(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = accelerator.register_load_state_pre_hook(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase__ )
# make sure random weights don't match with hooks
load_random_weights(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
_UpperCAmelCase : List[Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase__ )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
_UpperCAmelCase : int = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase : Optional[int] = create_components()
_UpperCAmelCase : List[str] = None
# This should work
_UpperCAmelCase : List[str] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(dummy_obj is None )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = Accelerator()
_UpperCAmelCase : Optional[Any] = create_components()
_UpperCAmelCase : List[Any] = [1, 2, 3]
# This should work
_UpperCAmelCase : Union[str, Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase__ , device_map={"": 0} , )
_UpperCAmelCase : List[str] = Accelerator()
# This should work
_UpperCAmelCase : Optional[int] = accelerator.prepare(lowerCamelCase__ )
@slow
@require_bnb
def lowerCAmelCase__ ( self : int ) ->Tuple:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_UpperCAmelCase : Optional[Any] = Accelerator()
with init_empty_weights():
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_UpperCAmelCase : str = infer_auto_device_map(lowerCamelCase__ )
_UpperCAmelCase : Any = "cpu"
_UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , llm_inta_enable_fpaa_cpu_offload=lowerCamelCase__ )
# This should not work and get value error
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : Dict = accelerator.prepare(lowerCamelCase__ )
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_UpperCAmelCase : Tuple = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_UpperCAmelCase : int = infer_auto_device_map(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase__ , device_map=lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = accelerator.prepare(lowerCamelCase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
_UpperCAmelCase : Tuple = infer_auto_device_map(lowerCamelCase__ )
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase__ , device_map=lowerCamelCase__ , )
_UpperCAmelCase : Optional[int] = Accelerator()
# This should work
_UpperCAmelCase : int = accelerator.prepare(lowerCamelCase__ )
@require_cuda
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = torch.nn.Linear(10 , 10 )
_UpperCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_UpperCAmelCase : int = Accelerator(cpu=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = accelerator.prepare(lowerCamelCase__ )
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_UpperCAmelCase : List[Any] = False
if num < 0:
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[Any] = -num
_UpperCAmelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(__lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = int(__lowerCAmelCase )
# Initialize Result
_UpperCAmelCase : List[str] = []
# Traverse through all denomination
for denomination in reversed(__lowerCAmelCase ):
# Find denominations
while int(__lowerCAmelCase ) >= int(__lowerCAmelCase ):
total_value -= int(__lowerCAmelCase )
answer.append(__lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCamelCase__ = []
lowerCamelCase__ = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
lowerCamelCase__ = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
lowerCamelCase__ = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCamelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
lowerCamelCase__ = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'''Following is minimal change for {value}: ''')
lowerCamelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase = logging.getLogger(__name__)
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys())})
__magic_name__ : str = field(metadata={"help": "Should contain the data files for the task."})
__magic_name__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : bool = field(
default=__a , metadata={"help": "Overwrite the cached training and evaluation sets"})
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a_ , a_ , a_ =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowercase__ )
# Set seed
set_seed(training_args.seed )
try:
a_ =processors[data_args.task_name]()
a_ =processor.get_labels()
a_ =len(lowercase__ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
a_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a_ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowercase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a_ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowercase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowercase__ ) -> Dict:
a_ =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowercase__ , p.label_ids )}
# Data collator
a_ =DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a_ =Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a_ ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a_ =trainer.evaluate()
a_ =os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(lowercase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowercase__ , lowercase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowercase__ )
return results
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 41
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 1
|
'''simple docstring'''
from PIL import Image
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =(2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowercase__ ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
lowercase = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "trocr"
__magic_name__ : Optional[int] = ["past_key_values"]
__magic_name__ : List[Any] = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , lowerCAmelCase_=5_0_2_6_5 , lowerCAmelCase_=1_0_2_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1_6 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_="gelu" , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> Union[str, Any]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =decoder_layers
a_ =decoder_attention_heads
a_ =decoder_ffn_dim
a_ =activation_function
a_ =max_position_embeddings
a_ =dropout
a_ =attention_dropout
a_ =activation_dropout
a_ =init_std
a_ =decoder_layerdrop
a_ =use_cache
a_ =scale_embedding
a_ =use_learned_position_embeddings
a_ =layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 1
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[]
for i in range(len(lowercase__ ) ):
a_ =[]
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
a_ =0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
a_ =cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase__ )
return next_generation
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[]
for _ in range(lowercase__ ):
# Create output image
a_ =Image.new("RGB" , (len(cells[0] ), len(lowercase__ )) )
a_ =img.load()
# Save cells to image
for x in range(len(lowercase__ ) ):
for y in range(len(cells[0] ) ):
a_ =2_5_5 - cells[y][x] * 2_5_5
a_ =(colour, colour, colour)
# Save image
images.append(lowercase__ )
a_ =new_generation(lowercase__ )
return images
if __name__ == "__main__":
lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 41
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 1
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
a_ =True
# Deal with multi-line cases
elif (
re.search(
rF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , lowercase__ , )
is not None
):
a_ =True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a_ =True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a_ =[
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
a_ =["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
a_ =True
if not attribute_used:
a_ =False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a_ =True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a_ =True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a_ =True
elif attribute.endswith("_token_id" ):
a_ =True
# configuration class specific cases
if not case_allowed:
a_ =SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a_ =allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =dict(inspect.signature(config_class.__init__ ).parameters )
a_ =[x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
a_ =[signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a_ ={}
if len(config_class.attribute_map ) > 0:
a_ ={v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a_ =inspect.getsourcefile(lowercase__ )
a_ =os.path.dirname(lowercase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a_ =[os.path.join(lowercase__ , lowercase__ ) for fn in os.listdir(lowercase__ ) if fn.startswith("modeling_" )]
# Get the source code strings
a_ =[]
for path in modeling_paths:
if os.path.isfile(lowercase__ ):
with open(lowercase__ ) as fp:
modeling_sources.append(fp.read() )
a_ =[]
for config_param, default_value in zip(lowercase__ , lowercase__ ):
# `attributes` here is all the variant names for `config_param`
a_ =[config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowercase__ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ={}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a_ =[
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowercase__ : inspect.isclass(lowercase__ )
and issubclass(lowercase__ , lowercase__ )
and inspect.getmodule(lowercase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a_ =check_config_attributes_being_used(lowercase__ )
if len(lowercase__ ) > 0:
a_ =unused_attributes
if len(lowercase__ ) > 0:
a_ ="The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(lowercase__ )
if __name__ == "__main__":
check_config_attributes()
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
a_ =grid[0]
for row_n in range(1 , len(lowercase__ ) ):
a_ =grid[row_n]
a_ =fill_row(lowercase__ , lowercase__ )
a_ =grid[row_n]
return grid[-1][-1]
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowercase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 1
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Union[str, Any] = "mvp"
__magic_name__ : Dict = ["past_key_values"]
__magic_name__ : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , lowerCAmelCase_=5_0_2_6_7 , lowerCAmelCase_=1_0_2_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_6 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_="gelu" , lowerCAmelCase_=1_0_2_4 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=0.0 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=False , lowerCAmelCase_=1_0_0 , lowerCAmelCase_=8_0_0 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
a_ =vocab_size
a_ =max_position_embeddings
a_ =d_model
a_ =encoder_ffn_dim
a_ =encoder_layers
a_ =encoder_attention_heads
a_ =decoder_ffn_dim
a_ =decoder_layers
a_ =decoder_attention_heads
a_ =dropout
a_ =attention_dropout
a_ =activation_dropout
a_ =activation_function
a_ =init_std
a_ =encoder_layerdrop
a_ =decoder_layerdrop
a_ =classifier_dropout
a_ =use_cache
a_ =encoder_layers
a_ =scale_embedding # scale factor will be sqrt(d_model) if True
a_ =use_prompt
a_ =prompt_length
a_ =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , lowerCAmelCase_):
a_ =self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed.")
| 41
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 1
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 1
|
'''simple docstring'''
import os
import string
import sys
lowercase = 1 << 8
lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
lowercase = KEYMAP['''up''']
lowercase = KEYMAP['''left''']
if sys.platform == "win32":
lowercase = []
lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase = ord(str(i))
def UpperCAmelCase_ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
a_ ="mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
a_ =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
a_ =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
a_ =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
a_ =chr(KEYMAP["esc"] )
except KeyError:
a_ =cha[1]
else:
a_ =ch.decode(lowercase__ )
else:
a_ =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
a_ =sys.stdin.fileno()
a_ =termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
a_ =sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
a_ =get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
a_ =get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 41
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 1
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests.")
@require_torch
@require_tf
@slow
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Optional[Any]:
"""simple docstring"""
a_ =[file for file in os.listdir(lowerCAmelCase_) if os.path.isfile(os.path.join(lowerCAmelCase_ , lowerCAmelCase_))]
if identifier is not None:
a_ =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
for n_ in n_identifier:
a_ =[file for file in files if n_ not in file]
else:
a_ =[file for file in files if n_identifier not in file]
a_ =ignore_files or []
ignore_files.append("__init__.py")
a_ =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowerCAmelCase_)
if only_modules:
a_ =file.split(".")[0]
try:
a_ =getattr(lowerCAmelCase_ , lowerCAmelCase_)
a_ =doctest.DocTestSuite(lowerCAmelCase_)
a_ =unittest.TextTestRunner().run(lowerCAmelCase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ =doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =Path("src/transformers")
a_ ="modeling"
a_ =[
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_ , ignore_files=lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =Path("src/transformers")
a_ ="tokenization"
self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =Path("src/transformers")
a_ ="configuration"
self.analyze_directory(lowerCAmelCase_ , identifier=lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =Path("src/transformers")
a_ =["configuration", "modeling", "tokenization"]
self.analyze_directory(lowerCAmelCase_ , n_identifier=lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =Path("docs/source")
a_ =["favicon.ico"]
self.analyze_directory(lowerCAmelCase_ , ignore_files=lowerCAmelCase_ , only_modules=lowerCAmelCase_)
| 41
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 1
|
'''simple docstring'''
lowercase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 41
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowercase = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowercase = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase ( datasets.Metric):
'''simple docstring'''
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = CHRF.CHAR_ORDER , lowerCAmelCase_ = CHRF.WORD_ORDER , lowerCAmelCase_ = CHRF.BETA , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , ) -> str:
"""simple docstring"""
a_ =len(references[0])
if any(len(lowerCAmelCase_) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
a_ =[[refs[i] for refs in references] for i in range(lowerCAmelCase_)]
a_ =CHRF(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
a_ =sb_chrf.corpus_score(lowerCAmelCase_ , lowerCAmelCase_)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : List[Any] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a_ =deprecated_arg[3:]
setattr(self , lowerCAmelCase_ , not kwargs.pop(lowerCAmelCase_))
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""")
a_ =kwargs.pop("torchscript" , self.torchscript)
a_ =kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics)
a_ =kwargs.pop("fp16_opt_level" , self.fpaa_opt_level)
super().__init__(**lowerCAmelCase_)
__magic_name__ : bool = field(default=__a , metadata={"help": "Trace the models using torchscript"})
__magic_name__ : bool = field(default=__a , metadata={"help": "Print Xla/PyTorch tpu metrics"})
__magic_name__ : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def lowercase_ ( self) -> Tuple["torch.device", int]:
"""simple docstring"""
requires_backends(self , ["torch"])
logger.info("PyTorch: setting up devices")
if not self.cuda:
a_ =torch.device("cpu")
a_ =0
elif is_torch_tpu_available():
a_ =xm.xla_device()
a_ =0
else:
a_ =torch.device("cuda" if torch.cuda.is_available() else "cpu")
a_ =torch.cuda.device_count()
return device, n_gpu
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
requires_backends(self , ["torch"])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowercase_ ( self) -> "torch.device":
"""simple docstring"""
requires_backends(self , ["torch"])
return self._setup_devices[0]
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
requires_backends(self , ["torch"])
return self._setup_devices[1]
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return self.n_gpu > 0
| 41
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 1
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowercase = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowercase = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowercase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowercase = F"""down_blocks.{i}.resnets.{j}."""
lowercase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowercase = F"""down_blocks.{i}.attentions.{j}."""
lowercase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowercase = F"""up_blocks.{i}.resnets.{j}."""
lowercase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowercase = F"""up_blocks.{i}.attentions.{j}."""
lowercase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowercase = F"""down_blocks.{i}.downsamplers.0.conv."""
lowercase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowercase = F"""up_blocks.{i}.upsamplers.0."""
lowercase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowercase = '''mid_block.attentions.0.'''
lowercase = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowercase = F"""mid_block.resnets.{j}."""
lowercase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
a_ =sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
a_ =v.replace(lowercase__ , lowercase__ )
a_ =v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
a_ =v.replace(lowercase__ , lowercase__ )
a_ =v
a_ ={v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowercase = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowercase = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowercase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowercase = F"""down_blocks.{i}.downsamplers.0."""
lowercase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowercase = F"""up_blocks.{i}.upsamplers.0."""
lowercase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowercase = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowercase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowercase = F"""mid_block.resnets.{i}."""
lowercase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowercase = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
a_ =v.replace(lowercase__ , lowercase__ )
a_ =v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
a_ =v.replace(lowercase__ , lowercase__ )
a_ =v
a_ ={v: vae_state_dict[k] for k, v in mapping.items()}
a_ =["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
a_ =reshape_weight_for_sd(lowercase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowercase = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowercase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowercase = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowercase = {'''q''': 0, '''k''': 1, '''v''': 2}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ ={}
a_ ={}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
a_ =k[: -len(".q_proj.weight" )]
a_ =k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
a_ =[None, None, None]
a_ =v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
a_ =k[: -len(".q_proj.bias" )]
a_ =k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
a_ =[None, None, None]
a_ =v
continue
a_ =textenc_pattern.sub(lambda lowercase__ : protected[re.escape(m.group(0 ) )] , lowercase__ )
a_ =v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
a_ =textenc_pattern.sub(lambda lowercase__ : protected[re.escape(m.group(0 ) )] , lowercase__ )
a_ =torch.cat(lowercase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
a_ =textenc_pattern.sub(lambda lowercase__ : protected[re.escape(m.group(0 ) )] , lowercase__ )
a_ =torch.cat(lowercase__ )
return new_state_dict
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowercase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowercase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowercase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowercase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowercase = load_file(unet_path, device='''cpu''')
else:
lowercase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowercase = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowercase = load_file(vae_path, device='''cpu''')
else:
lowercase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowercase = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowercase = load_file(text_enc_path, device='''cpu''')
else:
lowercase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowercase = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowercase = convert_unet_state_dict(unet_state_dict)
lowercase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowercase = convert_vae_state_dict(vae_state_dict)
lowercase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowercase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowercase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowercase = convert_text_enc_state_dict_vaa(text_enc_dict)
lowercase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowercase = convert_text_enc_state_dict(text_enc_dict)
lowercase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowercase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowercase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowercase = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 41
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 1
|
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowercase = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=None) -> Tuple:
"""simple docstring"""
a_ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__"):
setattr(self , lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_))
a_ =module._original_module if isinstance(lowerCAmelCase_ , _PatchedModuleObj) else module
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : List[str] = []
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None) -> Tuple:
"""simple docstring"""
a_ =obj
a_ =target
a_ =new
a_ =target.split(".")[0]
a_ ={}
a_ =attrs or []
def __enter__( self) -> str:
"""simple docstring"""
*a_ , a_ =self.target.split(".")
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase_)):
try:
a_ =import_module(".".join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a_ =getattr(self.obj , lowerCAmelCase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a_ =obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase_ , _PatchedModuleObj(lowerCAmelCase_ , attrs=self.attrs))
a_ =getattr(self.obj , lowerCAmelCase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , _PatchedModuleObj(getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , attrs=self.attrs))
a_ =getattr(lowerCAmelCase_ , lowerCAmelCase_)
# finally set the target attribute
setattr(lowerCAmelCase_ , lowerCAmelCase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a_ =getattr(import_module(".".join(lowerCAmelCase_)) , lowerCAmelCase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase_) is attr_value:
a_ =getattr(self.obj , lowerCAmelCase_)
setattr(self.obj , lowerCAmelCase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a_ =globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCAmelCase_ , self.new)
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
for attr in list(self.original):
setattr(self.obj , lowerCAmelCase_ , self.original.pop(lowerCAmelCase_))
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 41
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 1
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase = logging.get_logger(__name__)
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_=None , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCAmelCase_ , )
super().__init__(args=lowerCAmelCase_ , **lowerCAmelCase_)
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowercase = {
'''yjernite/retribert-base-uncased''': 512,
}
lowercase = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[Any] = VOCAB_FILES_NAMES
__magic_name__ : int = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Dict = RetriBertTokenizer
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
a_ =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowerCAmelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase_) != tokenize_chinese_chars
):
a_ =getattr(lowerCAmelCase_ , normalizer_state.pop("type"))
a_ =do_lower_case
a_ =strip_accents
a_ =tokenize_chinese_chars
a_ =normalizer_class(**lowerCAmelCase_)
a_ =do_lower_case
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None) -> List[str]:
"""simple docstring"""
a_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =[self.sep_token_id]
a_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_)
return tuple(lowerCAmelCase_)
| 41
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 1
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , )
def lowercase_ ( self , lowerCAmelCase_ = "auto") -> Dict:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a_ =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase_)
@torch.no_grad()
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = 5_1_2 , lowerCAmelCase_ = 5_1_2 , lowerCAmelCase_ = 5_0 , lowerCAmelCase_ = 7.5 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =1
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =len(lowerCAmelCase_)
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase_)}""")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCAmelCase_)}.""")
# get prompt text embeddings
a_ =self.tokenizer(
lowerCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
a_ =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
a_ =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""")
a_ =text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
a_ =self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
a_ , a_ , a_ =text_embeddings.shape
a_ =text_embeddings.repeat(1 , lowerCAmelCase_ , 1)
a_ =text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a_ =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a_ =42
if negative_prompt is None:
a_ =[""]
elif type(lowerCAmelCase_) is not type(lowerCAmelCase_):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase_)} !="""
f""" {type(lowerCAmelCase_)}.""")
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =[negative_prompt]
elif batch_size != len(lowerCAmelCase_):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase_)}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`.")
else:
a_ =negative_prompt
a_ =text_input_ids.shape[-1]
a_ =self.tokenizer(
lowerCAmelCase_ , padding="max_length" , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="pt" , )
a_ =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
a_ =uncond_embeddings.shape[1]
a_ =uncond_embeddings.repeat(lowerCAmelCase_ , lowerCAmelCase_ , 1)
a_ =uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a_ =torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a_ =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
a_ =(batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
a_ =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
a_ =torch.randn(
lowerCAmelCase_ , generator=lowerCAmelCase_ , device="cpu" , dtype=lowerCAmelCase_).to(self.device)
a_ =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device="cpu" , dtype=lowerCAmelCase_).to(
self.device)
else:
a_ =torch.randn(
lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_)
a_ =torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""")
a_ =latents_reference.to(self.device)
a_ =latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
a_ =(latents_shape[3] - latents_shape_reference[3]) // 2
a_ =(latents_shape[2] - latents_shape_reference[2]) // 2
a_ =latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
a_ =latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
a_ =0 if dx < 0 else dx
a_ =0 if dy < 0 else dy
a_ =max(-dx , 0)
a_ =max(-dy , 0)
# import pdb
# pdb.set_trace()
a_ =latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
a_ =self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
a_ =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a_ ="eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
a_ ={}
if accepts_eta:
a_ =eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase_)):
# expand the latents if we are doing classifier free guidance
a_ =torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a_ =self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# predict the noise residual
a_ =self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_).sample
# perform guidance
if do_classifier_free_guidance:
a_ , a_ =noise_pred.chunk(2)
a_ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
a_ =self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
a_ =1 / 0.1_8_2_1_5 * latents
a_ =self.vae.decode(lowerCAmelCase_).sample
a_ =(image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a_ =image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if self.safety_checker is not None:
a_ =self.feature_extractor(self.numpy_to_pil(lowerCAmelCase_) , return_tensors="pt").to(
self.device)
a_ , a_ =self.safety_checker(
images=lowerCAmelCase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
a_ =None
if output_type == "pil":
a_ =self.numpy_to_pil(lowerCAmelCase_)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCAmelCase_ , nsfw_content_detected=lowerCAmelCase_)
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 1
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
debug_launcher(test_script.main)
def lowercase_ ( self) -> int:
"""simple docstring"""
debug_launcher(test_ops.main)
| 41
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=9_9 , lowerCAmelCase_=3_2 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_6 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Optional[Any]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =seq_length
a_ =is_training
a_ =use_input_mask
a_ =use_token_type_ids
a_ =use_labels
a_ =vocab_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =type_sequence_label_size
a_ =initializer_range
a_ =num_labels
a_ =num_choices
a_ =scope
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ =None
if self.use_input_mask:
a_ =random_attention_mask([self.batch_size, self.seq_length])
a_ =None
if self.use_token_type_ids:
a_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ =None
a_ =None
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ =ids_tensor([self.batch_size] , self.num_choices)
a_ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self) -> str:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =self.prepare_config_and_inputs()
a_ =True
a_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =NezhaModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
a_ =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> int:
"""simple docstring"""
a_ =True
a_ =NezhaModel(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =NezhaForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =NezhaForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =NezhaForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =NezhaForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =self.num_labels
a_ =NezhaForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =self.num_labels
a_ =NezhaForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =self.num_choices
a_ =NezhaForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =config_and_inputs
a_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __a , __a , __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : List[Any] = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ : List[str] = True
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False) -> Optional[int]:
"""simple docstring"""
a_ =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
a_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
a_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =NezhaModelTester(self)
a_ =ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase_)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =self.model_tester.prepare_config_and_inputs_for_decoder()
a_ =None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_)
@slow
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =NezhaModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
@require_torch_gpu
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a_ =True
a_ =model_class(config=lowerCAmelCase_)
a_ =self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
a_ =torch.jit.trace(
lowerCAmelCase_ , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "bert.pt"))
a_ =torch.jit.load(os.path.join(lowerCAmelCase_ , "bert.pt") , map_location=lowerCAmelCase_)
loaded(inputs_dict["input_ids"].to(lowerCAmelCase_) , inputs_dict["attention_mask"].to(lowerCAmelCase_))
@require_torch
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =NezhaModel.from_pretrained("sijunhe/nezha-cn-base")
a_ =torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ =torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)[0]
a_ =torch.Size((1, 6, 7_6_8))
self.assertEqual(output.shape , lowerCAmelCase_)
a_ =torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4))
@slow
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base")
a_ =torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ =torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)[0]
a_ =torch.Size((1, 6, 2_1_1_2_8))
self.assertEqual(output.shape , lowerCAmelCase_)
a_ =torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4))
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowercase = None
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
lowercase = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
lowercase = '''▁'''
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : int = BigBirdTokenizer
__magic_name__ : List[Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_="[CLS]" , **lowerCAmelCase_ , ) -> int:
"""simple docstring"""
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else bos_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else eos_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else unk_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else pad_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cls_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
a_ =vocab_file
a_ =False if not self.vocab_file else True
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =[self.sep_token_id]
a_ =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_)) + [1]
return [1] + ([0] * len(lowerCAmelCase_)) + [1] + ([0] * len(lowerCAmelCase_)) + [1]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =[self.sep_token_id]
a_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowerCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_):
copyfile(self.vocab_file , lowerCAmelCase_)
return (out_vocab_file,)
| 41
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 1
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[0] * no_of_processes
a_ =[0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase__ ):
a_ =burst_time[i]
a_ =0
a_ =0
a_ =9_9_9_9_9_9_9_9_9
a_ =0
a_ =False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
a_ =remaining_time[j]
a_ =j
a_ =True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
a_ =remaining_time[short]
if minm == 0:
a_ =9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
a_ =False
# Find finish time of current process
a_ =increment_time + 1
# Calculate waiting time
a_ =finish_time - arrival_time[short]
a_ =finar - burst_time[short]
if waiting_time[short] < 0:
a_ =0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[0] * no_of_processes
for i in range(lowercase__ ):
a_ =burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =0
a_ =0
for i in range(lowercase__ ):
a_ =total_waiting_time + waiting_time[i]
a_ =total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowercase = int(input())
lowercase = [0] * no_of_processes
lowercase = [0] * no_of_processes
lowercase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowercase , lowercase = map(int, input().split())
lowercase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase = burst_time
lowercase = no_of_processes
lowercase = waiting_time
lowercase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowercase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
lowercase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
lowercase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__="binary" ):
'''simple docstring'''
a_ =simple_accuracy(lowercase__ , lowercase__ )
a_ =float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ ={}
for id_pred, label in zip(lowercase__ , lowercase__ ):
a_ =F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
a_ =id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
a_ =[(pred, label)]
a_ , a_ =[], []
for question, preds_labels in question_map.items():
a_ , a_ =zip(*lowercase__ )
a_ =fa_score(y_true=lowercase__ , y_pred=lowercase__ , average="macro" )
fas.append(lowercase__ )
a_ =int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) )
ems.append(lowercase__ )
a_ =float(sum(lowercase__ ) / len(lowercase__ ) )
a_ =sum(lowercase__ ) / len(lowercase__ )
a_ =float(fa_score(y_true=lowercase__ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase ( datasets.Metric):
'''simple docstring'''
def lowercase_ ( self) -> str:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowercase_ ( self) -> Any:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64"),
"query": datasets.Value("int64"),
},
"prediction_text": datasets.Value("string"),
},
"references": {
"idx": {
"passage": datasets.Value("int64"),
"query": datasets.Value("int64"),
},
"answers": datasets.Sequence(datasets.Value("string")),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64"),
"paragraph": datasets.Value("int64"),
"question": datasets.Value("int64"),
},
"prediction": datasets.Value("int64"),
},
"references": datasets.Value("int64"),
}
else:
return {
"predictions": datasets.Value("int64"),
"references": datasets.Value("int64"),
}
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCAmelCase_ , lowerCAmelCase_)}
elif self.config_name == "cb":
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ , fa_avg="macro")
elif self.config_name == "record":
a_ =[
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
a_ ={pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(lowerCAmelCase_ , lowerCAmelCase_)[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCAmelCase_ , lowerCAmelCase_)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]")
| 41
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = ["image_processor", "tokenizer"]
__magic_name__ : Optional[int] = "CLIPImageProcessor"
__magic_name__ : Dict = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
a_ =kwargs.pop("feature_extractor")
a_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
a_ =self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
if images is not None:
a_ =self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
if text is not None and images is not None:
a_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_) , tensor_type=lowerCAmelCase_)
def lowercase_ ( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_)
@property
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.tokenizer.model_input_names
a_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 41
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[str] = CpmAntTokenizer
__magic_name__ : Dict = False
def lowercase_ ( self) -> Dict:
"""simple docstring"""
super().setUp()
a_ =[
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
@tooslow
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b")
a_ ="今天天气真好!"
a_ =["今天", "天气", "真", "好", "!"]
a_ =tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ ="今天天气真好!"
a_ =[tokenizer.bos_token] + tokens
a_ =[6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , lowerCAmelCase_)
a_ =tokenizer.decode(lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
| 41
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 1
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
a_ =remove_duplicates(key.upper() )
a_ =len(lowercase__ )
# First fill cipher with key characters
a_ ={alphabet[i]: char for i, char in enumerate(lowercase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowercase__ ) , 2_6 ):
a_ =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
a_ =alphabet[i - offset]
a_ =char
return cipher_alphabet
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return "".join(cipher_map.get(lowercase__ , lowercase__ ) for ch in message.upper() )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowercase__ , lowercase__ ) for ch in message.upper() )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =input("Enter message to encode or decode: " ).strip()
a_ =input("Enter keyword: " ).strip()
a_ =input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
a_ ={"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
a_ =create_cipher_map(lowercase__ )
print(func(lowercase__ , lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 41
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 1
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 1
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase ( __a , __a):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase_ = 1_2_8 , lowerCAmelCase_ = 2_5_6 , lowerCAmelCase_ = 2_0_0_0.0 , lowerCAmelCase_ = 7_6_8 , lowerCAmelCase_ = 1_2 , lowerCAmelCase_ = 1_2 , lowerCAmelCase_ = 6_4 , lowerCAmelCase_ = 2_0_4_8 , lowerCAmelCase_ = 0.1 , ) -> Any:
"""simple docstring"""
super().__init__()
a_ =nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_) , nn.SiLU() , )
a_ =nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_)
a_ =False
a_ =nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
a_ =nn.Dropout(p=lowerCAmelCase_)
a_ =nn.ModuleList()
for lyr_num in range(lowerCAmelCase_):
# FiLM conditional T5 decoder
a_ =DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_)
self.decoders.append(lowerCAmelCase_)
a_ =TaLayerNorm(lowerCAmelCase_)
a_ =nn.Dropout(p=lowerCAmelCase_)
a_ =nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ , a_ , a_ =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
a_ =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
a_ =self.conditioning_emb(lowerCAmelCase_).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
a_ =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
a_ =torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device) , (batch, seq_length) , )
a_ =self.position_encoding(lowerCAmelCase_)
a_ =self.continuous_inputs_projection(lowerCAmelCase_)
inputs += position_encodings
a_ =self.dropout(lowerCAmelCase_)
# decoder: No padding present.
a_ =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
a_ =[(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
a_ =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
a_ =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
a_ =lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
a_ =self.decoder_norm(lowerCAmelCase_)
a_ =self.post_dropout(lowerCAmelCase_)
a_ =self.spec_out(lowerCAmelCase_)
return spec_out
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-6) -> Any:
"""simple docstring"""
super().__init__()
a_ =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> List[str]:
"""simple docstring"""
a_ =self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
a_ =torch.where(encoder_attention_mask > 0 , 0 , -1e10).to(
encoder_hidden_states.dtype)
a_ =self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
a_ =self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_)
return (hidden_states,)
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
super().__init__()
a_ =TaLayerNorm(lowerCAmelCase_)
a_ =TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_)
a_ =Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_)
a_ =nn.Dropout(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Optional[Any]:
"""simple docstring"""
a_ =self.layer_norm(lowerCAmelCase_)
if conditioning_emb is not None:
a_ =self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_)
# Self-attention block
a_ =self.attention(lowerCAmelCase_)
a_ =hidden_states + self.dropout(lowerCAmelCase_)
return hidden_states
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
super().__init__()
a_ =Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_)
a_ =TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_)
a_ =nn.Dropout(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
"""simple docstring"""
a_ =self.layer_norm(lowerCAmelCase_)
a_ =self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1) , )
a_ =hidden_states + self.dropout(lowerCAmelCase_)
return layer_output
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
super().__init__()
a_ =TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_)
a_ =TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_)
a_ =TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_)
a_ =nn.Dropout(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None) -> int:
"""simple docstring"""
a_ =self.layer_norm(lowerCAmelCase_)
if conditioning_emb is not None:
a_ =self.film(lowerCAmelCase_ , lowerCAmelCase_)
a_ =self.DenseReluDense(lowerCAmelCase_)
a_ =hidden_states + self.dropout(lowerCAmelCase_)
return hidden_states
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
super().__init__()
a_ =nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
a_ =nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
a_ =nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_)
a_ =nn.Dropout(lowerCAmelCase_)
a_ =NewGELUActivation()
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =self.act(self.wi_a(lowerCAmelCase_))
a_ =self.wi_a(lowerCAmelCase_)
a_ =hidden_gelu * hidden_linear
a_ =self.dropout(lowerCAmelCase_)
a_ =self.wo(lowerCAmelCase_)
return hidden_states
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1e-6) -> List[str]:
"""simple docstring"""
super().__init__()
a_ =nn.Parameter(torch.ones(lowerCAmelCase_))
a_ =eps
def lowercase_ ( self , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=lowerCAmelCase_)
a_ =hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
a_ =hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.0_4_4_7_1_5 * torch.pow(lowerCAmelCase_ , 3.0))))
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
super().__init__()
a_ =nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =self.scale_bias(lowerCAmelCase_)
a_ , a_ =torch.chunk(lowerCAmelCase_ , 2 , -1)
a_ =x * (1 + scale) + shift
return x
| 41
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[Any] = MobileBertTokenizer
__magic_name__ : List[str] = MobileBertTokenizerFast
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
__magic_name__ : Any = filter_non_english
__magic_name__ : List[str] = "google/mobilebert-uncased"
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
super().setUp()
a_ =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
a_ =[
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="UNwant\u00E9d,running"
a_ ="unwanted, running"
return input_text, output_text
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.tokenizer_class(self.vocab_file)
a_ =tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [9, 6, 7, 1_2, 1_0, 1_1])
def lowercase_ ( self) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a_ =self.get_tokenizer()
a_ =self.get_rust_tokenizer()
a_ ="UNwant\u00E9d,running"
a_ =tokenizer.tokenize(lowerCAmelCase_)
a_ =rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
a_ =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ =self.get_rust_tokenizer()
a_ =tokenizer.encode(lowerCAmelCase_)
a_ =rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
# With lower casing
a_ =self.get_tokenizer(do_lower_case=lowerCAmelCase_)
a_ =self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_)
a_ ="UNwant\u00E9d,running"
a_ =tokenizer.tokenize(lowerCAmelCase_)
a_ =rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
a_ =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ =self.get_rust_tokenizer()
a_ =tokenizer.encode(lowerCAmelCase_)
a_ =rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =BasicTokenizer(do_lower_case=lowerCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =BasicTokenizer(do_lower_case=lowerCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =BasicTokenizer(do_lower_case=lowerCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
a_ ={}
for i, token in enumerate(lowerCAmelCase_):
a_ =i
a_ =WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def lowercase_ ( self) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def lowercase_ ( self) -> Dict:
"""simple docstring"""
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.get_tokenizer()
a_ =self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
@slow
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.tokenizer_class.from_pretrained("google/mobilebert-uncased")
a_ =tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_)
a_ =tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_)
a_ =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_)
a_ =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_)
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def lowercase_ ( self) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
a_ =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_)
a_ =f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
a_ =tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
a_ =tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case") else False
a_ =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =["的", "人", "有"]
a_ ="".join(lowerCAmelCase_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
a_ =True
a_ =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_)
a_ =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_)
a_ =tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
a_ =tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
a_ =tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_)
a_ =tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ =False
a_ =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_)
a_ =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_)
a_ =tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
a_ =tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
a_ =tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_)
a_ =tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_)
# it is expected that only the first Chinese character is not preceded by "##".
a_ =[
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_)
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
| 41
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
a_ ="segformer.encoder." + key
if key.startswith("backbone" ):
a_ =key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a_ =key[key.find("patch_embed" ) + len("patch_embed" )]
a_ =key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowercase__ )-1}""" )
if "norm" in key:
a_ =key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a_ =key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
a_ =key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowercase__ )-1}""" )
if "layer_norm1" in key:
a_ =key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
a_ =key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
a_ =key[key.find("block" ) + len("block" )]
a_ =key.replace(F"""block{idx}""" , F"""block.{int(lowercase__ )-1}""" )
if "attn.q" in key:
a_ =key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
a_ =key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
a_ =key.replace("attn" , "attention.self" )
if "fc1" in key:
a_ =key.replace("fc1" , "dense1" )
if "fc2" in key:
a_ =key.replace("fc2" , "dense2" )
if "linear_pred" in key:
a_ =key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
a_ =key.replace("linear_fuse.conv" , "linear_fuse" )
a_ =key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a_ =key[key.find("linear_c" ) + len("linear_c" )]
a_ =key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowercase__ )-1}""" )
if key.startswith("head" ):
a_ =key.replace("head" , "classifier" )
a_ =value
return new_state_dict
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a_ =state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
a_ =state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
a_ =kv_weight[
: config.hidden_sizes[i], :
]
a_ =kv_bias[: config.hidden_sizes[i]]
a_ =kv_weight[
config.hidden_sizes[i] :, :
]
a_ =kv_bias[
config.hidden_sizes[i] :
]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =SegformerConfig()
a_ =False
# set attributes based on model_name
a_ ="huggingface/label-files"
if "segformer" in model_name:
a_ =model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
a_ =1_5_0
a_ ="ade20k-id2label.json"
a_ =(1, 1_5_0, 1_2_8, 1_2_8)
elif "city" in model_name:
a_ =1_9
a_ ="cityscapes-id2label.json"
a_ =(1, 1_9, 1_2_8, 1_2_8)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
a_ =True
a_ =model_name[4:6]
a_ =1_0_0_0
a_ ="imagenet-1k-id2label.json"
a_ =(1, 1_0_0_0)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
a_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
a_ =2_5_6
elif size == "b2":
a_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
a_ =7_6_8
a_ =[3, 4, 6, 3]
elif size == "b3":
a_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
a_ =7_6_8
a_ =[3, 4, 1_8, 3]
elif size == "b4":
a_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
a_ =7_6_8
a_ =[3, 8, 2_7, 3]
elif size == "b5":
a_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
a_ =7_6_8
a_ =[3, 6, 4_0, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
a_ =SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowercase__ , align=lowercase__ , do_random_crop=lowercase__ )
# prepare image
a_ =prepare_img()
a_ =image_processor(images=lowercase__ , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
a_ =torch.load(lowercase__ , map_location=torch.device("cpu" ) )
else:
a_ =torch.load(lowercase__ , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
a_ =rename_keys(lowercase__ , encoder_only=lowercase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
if encoder_only:
a_ =False
a_ =SegformerForImageClassification(lowercase__ )
else:
a_ =SegformerForSemanticSegmentation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
a_ =model(lowercase__ )
a_ =outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
a_ =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
a_ =torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
a_ =torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
a_ =torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
a_ =torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
a_ =torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
a_ =torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
a_ =torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
a_ =torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
a_ =torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
a_ =logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 41
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 1
|
'''simple docstring'''
import os
import sys
lowercase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoConfig.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoModel.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCAmelCase_ ( *lowercase__ , **lowercase__ ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*lowercase__ , **lowercase__ )
| 41
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowercase = pytest.mark.integration
lowercase = {'''comet'''}
lowercase = importlib.util.find_spec('''fairseq''') is not None
lowercase = {'''code_eval'''}
lowercase = os.name == '''nt'''
lowercase = {'''bertscore''', '''frugalscore''', '''perplexity'''}
lowercase = importlib.util.find_spec('''transformers''') is not None
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
@wraps(lowercase__ )
def wrapper(self , lowercase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , lowercase__ )
return wrapper
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
@wraps(lowercase__ )
def wrapper(self , lowercase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , lowercase__ )
return wrapper
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
@wraps(lowercase__ )
def wrapper(self , lowercase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , lowercase__ )
return wrapper
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(
__a , __a , __a)
@local
class UpperCAmelCase ( parameterized.TestCase):
'''simple docstring'''
__magic_name__ : Any = {}
__magic_name__ : Optional[int] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ ="[...]"
a_ =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase_)).module_path)
a_ =datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase_)
# check parameters
a_ =inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCAmelCase_ , metric_module.__name__):
with self.use_local_metrics():
try:
a_ =doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def lowercase_ ( self , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ ="[...]"
a_ =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase_)).module_path)
# run doctest
with self.use_local_metrics():
a_ =doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase_):
yield
else:
yield
@contextmanager
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
def load_local_metric(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_):
return load_metric(os.path.join("metrics" , lowerCAmelCase_) , *lowerCAmelCase_ , **lowerCAmelCase_)
with patch("datasets.load_metric") as mock_load_metric:
a_ =load_local_metric
yield
@classmethod
def lowercase_ ( cls , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
def wrapper(lowerCAmelCase_):
a_ =contextmanager(lowerCAmelCase_)
a_ =patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
assert len(input_dict["input_ids"]) == 2
return np.array([1.0_3, 1.0_4])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
a_ =MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
import torch
def bert_cos_score_idf(lowercase__ , lowercase__ , *lowercase__ , **lowercase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowercase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
a_ =bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
def load_from_checkpoint(lowercase__ ):
class UpperCAmelCase :
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
assert len(lowerCAmelCase_) == 2
a_ =[0.1_9, 0.9_2]
return scores, sum(lowerCAmelCase_) / len(lowerCAmelCase_)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
a_ =None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
a_ =load_from_checkpoint
yield
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =load_metric(os.path.join("metrics" , "seqeval" ) )
a_ ="ERROR"
a_ =F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(lowercase__ , match=re.escape(lowercase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowercase__ )
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
a_ =1
a_ =1
while repunit:
a_ =(1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0_0_0_0 ):
'''simple docstring'''
a_ =limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowercase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 1
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 3_2 , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_5_5 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase_ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase_ = True , lowerCAmelCase_=7 , lowerCAmelCase_=3_0 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=3 , ) -> str:
"""simple docstring"""
a_ =parent
a_ =do_resize
a_ =size if size is not None else {"shortest_edge": 2_8_8}
a_ =size_divisor
a_ =do_rescale
a_ =rescale_factor
a_ =do_normalize
a_ =do_center_crop
a_ =image_mean
a_ =image_std
a_ =do_pad
a_ =batch_size
a_ =num_channels
a_ =min_resolution
a_ =max_resolution
def lowercase_ ( self) -> int:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> str:
"""simple docstring"""
if not batched:
a_ =self.size["shortest_edge"]
a_ =image_inputs[0]
if isinstance(lowerCAmelCase_ , Image.Image):
a_ , a_ =image.size
else:
a_ , a_ =image.shape[1], image.shape[2]
a_ =size / min(lowerCAmelCase_ , lowerCAmelCase_)
if h < w:
a_ , a_ =size, scale * w
else:
a_ , a_ =scale * h, size
a_ =int((1_3_3_3 / 8_0_0) * size)
if max(lowerCAmelCase_ , lowerCAmelCase_) > max_size:
a_ =max_size / max(lowerCAmelCase_ , lowerCAmelCase_)
a_ =newh * scale
a_ =neww * scale
a_ , a_ =int(newh + 0.5), int(neww + 0.5)
a_ , a_ =(
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
a_ =[]
for image in image_inputs:
a_ , a_ =self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a_ =max(lowerCAmelCase_ , key=lambda lowerCAmelCase_: item[0])[0]
a_ =max(lowerCAmelCase_ , key=lambda lowerCAmelCase_: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =BridgeTowerImageProcessingTester(self)
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std"))
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize"))
self.assertTrue(hasattr(lowerCAmelCase_ , "size"))
self.assertTrue(hasattr(lowerCAmelCase_ , "size_divisor"))
def lowercase_ ( self) -> str:
"""simple docstring"""
pass
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image)
# Test not batched input
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
a_ , a_ =self.image_processor_tester.get_expected_values(lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
a_ , a_ =self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray)
# Test not batched input
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
a_ , a_ =self.image_processor_tester.get_expected_values(lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
a_ , a_ =self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
# Test not batched input
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
a_ , a_ =self.image_processor_tester.get_expected_values(lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
a_ , a_ =self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 41
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 1
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
def decorator(lowercase__ ):
a_ =getattr(lowercase__ , "handle_key" , [] )
handle += [key]
setattr(lowercase__ , "handle_key" , lowercase__ )
return func
return decorator
def UpperCAmelCase_ ( *lowercase__ ):
'''simple docstring'''
def decorator(lowercase__ ):
a_ =getattr(lowercase__ , "handle_key" , [] )
handle += keys
setattr(lowercase__ , "handle_key" , lowercase__ )
return func
return decorator
class UpperCAmelCase ( __a):
'''simple docstring'''
def __new__( cls , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =super().__new__(cls , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if not hasattr(lowerCAmelCase_ , "key_handler"):
setattr(lowerCAmelCase_ , "key_handler" , {})
setattr(lowerCAmelCase_ , "handle_input" , KeyHandler.handle_input)
for value in attrs.values():
a_ =getattr(lowerCAmelCase_ , "handle_key" , [])
for key in handled_keys:
a_ =value
return new_cls
@staticmethod
def lowercase_ ( cls) -> Optional[int]:
"""simple docstring"""
a_ =get_character()
if char != KEYMAP["undefined"]:
a_ =ord(lowerCAmelCase_)
a_ =cls.key_handler.get(lowerCAmelCase_)
if handler:
a_ =char
return handler(cls)
else:
return None
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 41
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =2
a_ =[]
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase__ )
if n > 1:
factors.append(lowercase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self) -> List[Any]:
"""simple docstring"""
a_ =[]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
self.events.append("on_init_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
self.events.append("on_train_begin")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
self.events.append("on_train_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
self.events.append("on_epoch_begin")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
self.events.append("on_epoch_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
self.events.append("on_step_begin")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
self.events.append("on_step_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
self.events.append("on_evaluate")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
self.events.append("on_predict")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> str:
"""simple docstring"""
self.events.append("on_save")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
self.events.append("on_log")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
self.events.append("on_prediction_step")
@require_torch
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =tempfile.mkdtemp()
def lowercase_ ( self) -> Any:
"""simple docstring"""
shutil.rmtree(self.output_dir)
def lowercase_ ( self , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=6_4 , lowerCAmelCase_=6_4 , lowerCAmelCase_=None , lowerCAmelCase_=False , **lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =RegressionDataset(length=lowerCAmelCase_)
a_ =RegressionDataset(length=lowerCAmelCase_)
a_ =RegressionModelConfig(a=lowerCAmelCase_ , b=lowerCAmelCase_)
a_ =RegressionPreTrainedModel(lowerCAmelCase_)
a_ =TrainingArguments(self.output_dir , disable_tqdm=lowerCAmelCase_ , report_to=[] , **lowerCAmelCase_)
return Trainer(
lowerCAmelCase_ , lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , callbacks=lowerCAmelCase_ , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
# Order doesn't matter
a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cb.__class__.__name__)
a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cb.__class__.__name__)
for cba, cba in zip(lowerCAmelCase_ , lowerCAmelCase_):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(lowerCAmelCase_ , cba.__class__)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(cba.__class__ , lowerCAmelCase_)
else:
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =["on_init_end", "on_train_begin"]
a_ =0
a_ =len(trainer.get_eval_dataloader())
a_ =["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("on_epoch_begin")
for _ in range(lowerCAmelCase_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save")
expected_events.append("on_epoch_end")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.get_trainer()
a_ =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
# Callbacks passed at init are added to the default callbacks
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
a_ =self.get_trainer(disable_tqdm=lowerCAmelCase_)
a_ =DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
a_ =self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCAmelCase_)
expected_callbacks.remove(lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
a_ =self.get_trainer()
a_ =trainer.pop_callback(lowerCAmelCase_)
self.assertEqual(cb.__class__ , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
trainer.add_callback(lowerCAmelCase_)
expected_callbacks.insert(0 , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
# We can also add, pop, or remove by instance
a_ =self.get_trainer()
a_ =trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCAmelCase_)
expected_callbacks.remove(lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
a_ =self.get_trainer()
a_ =trainer.callback_handler.callbacks[0]
a_ =trainer.pop_callback(lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
trainer.add_callback(lowerCAmelCase_)
expected_callbacks.insert(0 , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=lowerCAmelCase_)
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
# Independent log/save/eval
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps")
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch")
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
# A bit of everything
a_ =self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning") as warn_mock:
a_ =self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowerCAmelCase_) in warn_mock.call_args[0][0]
| 41
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =1
a_ =3
a_ =(3_2, 3_2)
a_ =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowerCAmelCase_)
return image
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
a_ =UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
a_ =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
a_ =RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase_)
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
def extract(*lowerCAmelCase_ , **lowerCAmelCase_):
class UpperCAmelCase :
'''simple docstring'''
def __init__( self) -> Optional[int]:
"""simple docstring"""
a_ =torch.ones([0])
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
self.pixel_values.to(lowerCAmelCase_)
return self
return Out()
return extract
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ ="cpu" # ensure determinism for the device-dependent torch.Generator
a_ =self.dummy_cond_unet
a_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase_)
a_ =self.dummy_vae
a_ =self.dummy_text_encoder
a_ =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
a_ =7_7
a_ =self.dummy_image.to(lowerCAmelCase_)
a_ =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
a_ =AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
a_ =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase_)
a_ =alt_pipe.to(lowerCAmelCase_)
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ ="A painting of a squirrel eating a burger"
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(0)
a_ =alt_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase_ , )
a_ =output.images
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(0)
a_ =alt_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
a_ =image[0, -3:, -3:, -1]
a_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
a_ =np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU")
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.dummy_cond_unet
a_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase_)
a_ =self.dummy_vae
a_ =self.dummy_text_encoder
a_ =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta")
a_ =7_7
a_ =self.dummy_image.to(lowerCAmelCase_)
# put models in fp16
a_ =unet.half()
a_ =vae.half()
a_ =bert.half()
# make sure here that pndm scheduler skips prk
a_ =AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
a_ =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase_)
a_ =alt_pipe.to(lowerCAmelCase_)
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ ="A painting of a squirrel eating a burger"
a_ =torch.manual_seed(0)
a_ =alt_pipe(
[prompt] , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase_ , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU")
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
# resize to resolution that is divisible by 8 but not 16 or 32
a_ =init_image.resize((7_6_0, 5_0_4))
a_ ="BAAI/AltDiffusion"
a_ =AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
pipe.enable_attention_slicing()
a_ ="A fantasy landscape, trending on artstation"
a_ =torch.manual_seed(0)
a_ =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase_ , output_type="np" , )
a_ =output.images[0]
a_ =image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
a_ =np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
a_ =init_image.resize((7_6_8, 5_1_2))
a_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy")
a_ ="BAAI/AltDiffusion"
a_ =AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
pipe.enable_attention_slicing()
a_ ="A fantasy landscape, trending on artstation"
a_ =torch.manual_seed(0)
a_ =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase_ , output_type="np" , )
a_ =output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1e-2
| 41
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 1
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 1
|
'''simple docstring'''
import argparse
import os
import re
lowercase = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase = re.compile(R'''\[([^\]]+)\]''')
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =_re_indent.search(lowercase__ )
return "" if search is None else search.groups()[0]
def UpperCAmelCase_ ( lowercase__ , lowercase__="" , lowercase__=None , lowercase__=None ):
'''simple docstring'''
a_ =0
a_ =code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(lowercase__ ):
index += 1
a_ =["\n".join(lines[:index] )]
else:
a_ =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a_ =[lines[index]]
index += 1
while index < len(lowercase__ ) and (end_prompt is None or not lines[index].startswith(lowercase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(lowercase__ ) )
if index < len(lowercase__ ) - 1:
a_ =[lines[index + 1]]
index += 1
else:
a_ =[]
else:
blocks.append("\n".join(lowercase__ ) )
a_ =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase__ ) > 0:
blocks.append("\n".join(lowercase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
def _inner(lowercase__ ):
return key(lowercase__ ).lower().replace("_" , "" )
return _inner
def UpperCAmelCase_ ( lowercase__ , lowercase__=None ):
'''simple docstring'''
def noop(lowercase__ ):
return x
if key is None:
a_ =noop
# Constants are all uppercase, they go first.
a_ =[obj for obj in objects if key(lowercase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a_ =[obj for obj in objects if key(lowercase__ )[0].isupper() and not key(lowercase__ ).isupper()]
# Functions begin with a lowercase, they go last.
a_ =[obj for obj in objects if not key(lowercase__ )[0].isupper()]
a_ =ignore_underscore(lowercase__ )
return sorted(lowercase__ , key=lowercase__ ) + sorted(lowercase__ , key=lowercase__ ) + sorted(lowercase__ , key=lowercase__ )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
def _replace(lowercase__ ):
a_ =match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
a_ =[part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ =keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase__ )] ) + "]"
a_ =import_statement.split("\n" )
if len(lowercase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a_ =2 if lines[1].strip() == "[" else 1
a_ =[(i, _re_strip_line.search(lowercase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a_ =sort_objects(lowercase__ , key=lambda lowercase__ : x[1] )
a_ =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a_ =_re_bracket_content.sub(_replace , lines[1] )
else:
a_ =[part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ =keys[:-1]
a_ =get_indent(lines[1] ) + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase__ )] )
return "\n".join(lowercase__ )
else:
# Finally we have to deal with imports fitting on one line
a_ =_re_bracket_content.sub(_replace , lowercase__ )
return import_statement
def UpperCAmelCase_ ( lowercase__ , lowercase__=True ):
'''simple docstring'''
with open(lowercase__ , "r" ) as f:
a_ =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a_ =split_code_in_indented_blocks(
lowercase__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a_ =main_blocks[block_idx]
a_ =block.split("\n" )
# Get to the start of the imports.
a_ =0
while line_idx < len(lowercase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a_ =len(lowercase__ )
else:
line_idx += 1
if line_idx >= len(lowercase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
a_ ="\n".join(block_lines[line_idx:-1] )
a_ =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a_ =split_code_in_indented_blocks(lowercase__ , indent_level=lowercase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
a_ =_re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a_ =[(pattern.search(lowercase__ ).groups()[0] if pattern.search(lowercase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a_ =[(i, key) for i, key in enumerate(lowercase__ ) if key is not None]
a_ =[x[0] for x in sorted(lowercase__ , key=lambda lowercase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a_ =0
a_ =[]
for i in range(len(lowercase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
a_ =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase__ )
count += 1
# And we put our main block back together with its first and last line.
a_ ="\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase__ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowercase__ , "w" ) as f:
f.write("\n".join(lowercase__ ) )
def UpperCAmelCase_ ( lowercase__=True ):
'''simple docstring'''
a_ =[]
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
a_ =sort_imports(os.path.join(lowercase__ , "__init__.py" ) , check_only=lowercase__ )
if result:
a_ =[os.path.join(lowercase__ , "__init__.py" )]
if len(lowercase__ ) > 0:
raise ValueError(F"""Would overwrite {len(lowercase__ )} files, run `make style`.""" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=2_2_4 , lowerCAmelCase_=1_0_0_0 , lowerCAmelCase_=[3, 3, 6, 4] , lowerCAmelCase_=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Any:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =is_training
a_ =use_labels
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =num_labels
a_ =image_size
a_ =layer_depths
a_ =embed_dims
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.num_labels)
a_ =self.get_config()
return config, pixel_values, labels
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase_ , layer_scale_init_value=1e-5 , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =SwiftFormerModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.num_labels
a_ =SwiftFormerForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
a_ =SwiftFormerForImageClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self) -> str:
"""simple docstring"""
((a_) , (a_) , (a_)) =self.prepare_config_and_inputs()
a_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __a , __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__magic_name__ : List[Any] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ : Union[str, Any] = False
__magic_name__ : List[Any] = False
__magic_name__ : int = False
__magic_name__ : List[str] = False
__magic_name__ : List[Any] = False
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =SwiftFormerModelTester(self)
a_ =ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(lowerCAmelCase_)
a_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear))
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(lowerCAmelCase_)
a_ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ =[*signature.parameters.keys()]
a_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
@slow
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =SwiftFormerModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skip(reason="SwiftFormer does not output attentions")
def lowercase_ ( self) -> Any:
"""simple docstring"""
pass
def lowercase_ ( self) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
a_ =model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
a_ =outputs.hidden_states
a_ =8
self.assertEqual(len(lowerCAmelCase_) , lowerCAmelCase_) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase_)):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
]) , )
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ =True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
def _config_zero_init(lowerCAmelCase_):
a_ =copy.deepcopy(lowerCAmelCase_)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , 1e-10)
if isinstance(getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , lowerCAmelCase_):
a_ =_config_zero_init(getattr(lowerCAmelCase_ , lowerCAmelCase_))
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return configs_no_init
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
a_ =_config_zero_init(lowerCAmelCase_)
for model_class in self.all_model_classes:
a_ =model_class(config=lowerCAmelCase_)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs") if is_vision_available() else None
@slow
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs").to(lowerCAmelCase_)
a_ =self.default_image_processor
a_ =prepare_img()
a_ =image_processor(images=lowerCAmelCase_ , return_tensors="pt").to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
a_ =model(**lowerCAmelCase_)
# verify the logits
a_ =torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
a_ =torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4))
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 1
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowercase = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
lowercase = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =calculate_rouge(lowercase__ , lowercase__ , bootstrap_aggregation=lowercase__ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(lowercase__ , lowercase__ )
a_ =calculate_rouge(lowercase__ , lowercase__ , bootstrap_aggregation=lowercase__ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="rougeLsum"
a_ =calculate_rouge(lowercase__ , lowercase__ , newline_sep=lowercase__ , rouge_keys=[k] )[k]
a_ =calculate_rouge(lowercase__ , lowercase__ , newline_sep=lowercase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =["rouge1", "rouge2", "rougeL"]
a_ =calculate_rouge(lowercase__ , lowercase__ , newline_sep=lowercase__ , rouge_keys=lowercase__ )
a_ =calculate_rouge(lowercase__ , lowercase__ , newline_sep=lowercase__ , rouge_keys=lowercase__ )
assert score_sep == score_no_sep
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =[
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
a_ =[
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(lowercase__ , lowercase__ , newline_sep=lowercase__ ) == calculate_rouge(lowercase__ , lowercase__ , newline_sep=lowercase__ )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =[
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
a_ =[
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
a_ =calculate_rouge(lowercase__ , lowercase__ , rouge_keys=["rougeLsum"] , newline_sep=lowercase__ )["rougeLsum"]
a_ =calculate_rouge(lowercase__ , lowercase__ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =Path("examples/seq2seq/test_data/wmt_en_ro" )
a_ =calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(lowercase__ , lowercase__ )
a_ =calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=lowercase__ )
assert isinstance(lowercase__ , lowercase__ )
| 41
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 1
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase = getLogger(__name__)
lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 8 , lowercase__ = DEFAULT_DEVICE , lowercase__=False , lowercase__="summarization" , lowercase__=None , **lowercase__ , ):
'''simple docstring'''
a_ =Path(lowercase__ ).open("w" , encoding="utf-8" )
a_ =str(lowercase__ )
a_ =AutoModelForSeqaSeqLM.from_pretrained(lowercase__ ).to(lowercase__ )
if fpaa:
a_ =model.half()
a_ =AutoTokenizer.from_pretrained(lowercase__ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
a_ =time.time()
# update config with task specific params
use_task_specific_params(lowercase__ , lowercase__ )
if prefix is None:
a_ =prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(lowercase__ , lowercase__ ) ) ):
a_ =[prefix + text for text in examples_chunk]
a_ =tokenizer(lowercase__ , return_tensors="pt" , truncation=lowercase__ , padding="longest" ).to(lowercase__ )
a_ =model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **lowercase__ , )
a_ =tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
a_ =int(time.time() - start_time ) # seconds
a_ =len(lowercase__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def UpperCAmelCase_ ( ):
'''simple docstring'''
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def UpperCAmelCase_ ( lowercase__=True ):
'''simple docstring'''
a_ =argparse.ArgumentParser()
parser.add_argument("model_name" , type=lowercase__ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=lowercase__ , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=lowercase__ , help="where to save summaries" )
parser.add_argument("--reference_path" , type=lowercase__ , required=lowercase__ , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=lowercase__ , required=lowercase__ , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=lowercase__ , required=lowercase__ , default=lowercase__ , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=lowercase__ , required=lowercase__ , default=lowercase__ , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=lowercase__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowercase__ , default=8 , required=lowercase__ , help="batch size" )
parser.add_argument(
"--n_obs" , type=lowercase__ , default=-1 , required=lowercase__ , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=lowercase__ , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
a_ , a_ =parser.parse_known_args()
a_ =parse_numeric_n_bool_cl_kwargs(lowercase__ )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
a_ =[" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
a_ =examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=lowercase__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
a_ =generate_summaries_or_translations(
lowercase__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **lowercase__ , )
if args.reference_path is None:
return {}
# Compute scores
a_ =calculate_bleu if "translation" in args.task else calculate_rouge
a_ =[x.rstrip() for x in open(args.save_path ).readlines()]
a_ =[x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowercase__ )]
a_ =score_fn(lowercase__ , lowercase__ )
scores.update(lowercase__ )
if args.dump_args:
scores.update(lowercase__ )
if args.info:
a_ =args.info
if verbose:
print(lowercase__ )
if args.score_path is not None:
json.dump(lowercase__ , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
import operator as op
lowercase = '''scaler.pt'''
lowercase = '''pytorch_model'''
lowercase = '''random_states'''
lowercase = '''optimizer'''
lowercase = '''scheduler'''
lowercase = '''pytorch_model.bin'''
lowercase = '''pytorch_model.bin.index.json'''
lowercase = '''model.safetensors'''
lowercase = '''model.safetensors.index.json'''
lowercase = '''1.10.2'''
lowercase = '''py38'''
lowercase = '''4.17.0'''
lowercase = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
lowercase = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
lowercase = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
lowercase = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
lowercase = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
lowercase = '''2.0.1'''
lowercase = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
lowercase = ['''default''', '''reduce-overhead''', '''max-autotune''']
lowercase = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowercase = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
lowercase = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
lowercase = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 41
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = TextToVideoSDPipeline
__magic_name__ : int = TEXT_TO_IMAGE_PARAMS
__magic_name__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__magic_name__ : List[Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def lowercase_ ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
a_ =UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
a_ =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0)
a_ =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
a_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
a_ =CLIPTextModel(lowerCAmelCase_)
a_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a_ ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0) -> Union[str, Any]:
"""simple docstring"""
if str(lowerCAmelCase_).startswith("mps"):
a_ =torch.manual_seed(lowerCAmelCase_)
else:
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
a_ ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ ="cpu" # ensure determinism for the device-dependent torch.Generator
a_ =self.get_dummy_components()
a_ =TextToVideoSDPipeline(**lowerCAmelCase_)
a_ =sd_pipe.to(lowerCAmelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_)
a_ ="np"
a_ =sd_pipe(**lowerCAmelCase_).frames
a_ =frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
a_ =np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase_ , expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase_ , expected_max_diff=1e-2)
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def lowercase_ ( self) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def lowercase_ ( self) -> str:
"""simple docstring"""
pass
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy")
a_ =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
a_ =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a_ =pipe.to("cuda")
a_ ="Spiderman is surfing"
a_ =torch.Generator(device="cpu").manual_seed(0)
a_ =pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2_5 , output_type="pt").frames
a_ =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy")
a_ =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
a_ =pipe.to("cuda")
a_ ="Spiderman is surfing"
a_ =torch.Generator(device="cpu").manual_seed(0)
a_ =pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="pt").frames
a_ =video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
| 41
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =ort.SessionOptions()
a_ =False
return options
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
a_ =OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ ="A red cat sitting on a park bench"
a_ =np.random.RandomState(0)
a_ =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCAmelCase_ , output_type="np" , )
a_ =output.images
a_ =images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ =np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
a_ =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx")
a_ =OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ ="A red cat sitting on a park bench"
a_ =np.random.RandomState(0)
a_ =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowerCAmelCase_ , output_type="np" , )
a_ =output.images
a_ =images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
a_ =np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 41
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=9_9 , lowerCAmelCase_=3_2 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=1_6 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=4 , ) -> Union[str, Any]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =seq_length
a_ =is_training
a_ =use_attention_mask
a_ =use_token_type_ids
a_ =use_labels
a_ =vocab_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =type_sequence_label_size
a_ =initializer_range
a_ =num_choices
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ =None
if self.use_attention_mask:
a_ =random_attention_mask([self.batch_size, self.seq_length])
a_ =None
if self.use_token_type_ids:
a_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ =config_and_inputs
a_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ =config_and_inputs
a_ =True
a_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : str = True
__magic_name__ : str = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =FlaxBertModelTester(self)
@slow
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =FlaxBertModel.from_pretrained("bert-base-cased")
a_ =model(np.ones((1, 1)))
self.assertIsNotNone(lowerCAmelCase_)
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =torch.load(lowercase__ , map_location="cpu" )
a_ =chkpt["model"]
# We have the base model one level deeper than the original XLM repository
a_ ={}
for k, v in state_dict.items():
if "pred_layer" in k:
a_ =v
else:
a_ =v
a_ =chkpt["params"]
a_ ={n: v for n, v in config.items() if not isinstance(lowercase__ , (torch.FloatTensor, numpy.ndarray) )}
a_ =chkpt["dico_word2id"]
a_ ={s + "</w>" if s.find("@@" ) == -1 and i > 1_3 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
a_ =pytorch_dump_folder_path + "/" + WEIGHTS_NAME
a_ =pytorch_dump_folder_path + "/" + CONFIG_NAME
a_ =pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowercase__ , lowercase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase__ , indent=2 ) + "\n" )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase__ , indent=2 ) + "\n" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 41
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 1
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase = None
try:
import msvcrt
except ImportError:
lowercase = None
try:
import fcntl
except ImportError:
lowercase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase = OSError
# Data
# ------------------------------------------------
lowercase = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowercase = '''3.0.12'''
lowercase = None
def UpperCAmelCase_ ( ):
'''simple docstring'''
global _logger
a_ =_logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =lock_file
return None
def __str__( self) -> List[str]:
"""simple docstring"""
a_ =f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =lock
return None
def __enter__( self) -> Optional[int]:
"""simple docstring"""
return self.lock
def __exit__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
self.lock.release()
return None
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=-1 , lowerCAmelCase_=None) -> Union[str, Any]:
"""simple docstring"""
a_ =max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
a_ =self.hash_filename_if_too_long(lowerCAmelCase_ , lowerCAmelCase_)
# The path to the lock file.
a_ =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
a_ =None
# The default timeout value.
a_ =timeout
# We use this lock primarily for the lock counter.
a_ =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
a_ =0
return None
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self._lock_file
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =float(lowerCAmelCase_)
return None
def lowercase_ ( self) -> int:
"""simple docstring"""
raise NotImplementedError()
def lowercase_ ( self) -> int:
"""simple docstring"""
raise NotImplementedError()
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return self._lock_file_fd is not None
def lowercase_ ( self , lowerCAmelCase_=None , lowerCAmelCase_=0.0_5) -> Union[str, Any]:
"""simple docstring"""
if timeout is None:
a_ =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
a_ =id(self)
a_ =self._lock_file
a_ =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""")
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""")
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""")
raise Timeout(self._lock_file)
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""")
time.sleep(lowerCAmelCase_)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
a_ =max(0 , self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def lowercase_ ( self , lowerCAmelCase_=False) -> Tuple:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
a_ =id(self)
a_ =self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""")
self._release()
a_ =0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""")
return None
def __enter__( self) -> Optional[int]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
self.release()
return None
def __del__( self) -> List[str]:
"""simple docstring"""
self.release(force=lowerCAmelCase_)
return None
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =os.path.basename(lowerCAmelCase_)
if len(lowerCAmelCase_) > max_length and max_length > 0:
a_ =os.path.dirname(lowerCAmelCase_)
a_ =str(hash(lowerCAmelCase_))
a_ =filename[: max_length - len(lowerCAmelCase_) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowerCAmelCase_ , lowerCAmelCase_)
else:
return path
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=-1 , lowerCAmelCase_=None) -> Tuple:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(lowerCAmelCase_ , timeout=lowerCAmelCase_ , max_filename_length=lowerCAmelCase_)
a_ ="\\\\?\\" + relative_to_absolute_path(self.lock_file)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
a_ =os.open(self._lock_file , lowerCAmelCase_)
except OSError:
pass
else:
try:
msvcrt.locking(lowerCAmelCase_ , msvcrt.LK_NBLCK , 1)
except OSError:
os.close(lowerCAmelCase_)
else:
a_ =fd
return None
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self._lock_file_fd
a_ =None
msvcrt.locking(lowerCAmelCase_ , msvcrt.LK_UNLCK , 1)
os.close(lowerCAmelCase_)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=-1 , lowerCAmelCase_=None) -> List[Any]:
"""simple docstring"""
a_ =os.statvfs(os.path.dirname(lowerCAmelCase_)).f_namemax
super().__init__(lowerCAmelCase_ , timeout=lowerCAmelCase_ , max_filename_length=lowerCAmelCase_)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
a_ =os.open(self._lock_file , lowerCAmelCase_)
try:
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(lowerCAmelCase_)
else:
a_ =fd
return None
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self._lock_file_fd
a_ =None
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_UN)
os.close(lowerCAmelCase_)
return None
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
a_ =os.open(self._lock_file , lowerCAmelCase_)
except OSError:
pass
else:
a_ =fd
return None
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
os.close(self._lock_file_fd)
a_ =None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase = None
if msvcrt:
lowercase = WindowsFileLock
elif fcntl:
lowercase = UnixFileLock
else:
lowercase = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 41
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 1
|
'''simple docstring'''
import math
def UpperCAmelCase_ ( lowercase__ , lowercase__ = 0 , lowercase__ = 0 ):
'''simple docstring'''
a_ =end or len(lowercase__ )
for i in range(lowercase__ , lowercase__ ):
a_ =i
a_ =array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a_ =array[temp_index - 1]
temp_index -= 1
a_ =temp_index_value
return array
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ): # Max Heap
'''simple docstring'''
a_ =index
a_ =2 * index + 1 # Left Node
a_ =2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a_ =left_index
if right_index < heap_size and array[largest] < array[right_index]:
a_ =right_index
if largest != index:
a_ , a_ =array[largest], array[index]
heapify(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =len(lowercase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowercase__ , lowercase__ , lowercase__ )
for i in range(n - 1 , 0 , -1 ):
a_ , a_ =array[0], array[i]
heapify(lowercase__ , 0 , lowercase__ )
return array
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =low
a_ =high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a_ , a_ =array[j], array[i]
i += 1
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return array
a_ =2 * math.ceil(math.loga(len(lowercase__ ) ) )
a_ =1_6
return intro_sort(lowercase__ , 0 , len(lowercase__ ) , lowercase__ , lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase__ )
max_depth -= 1
a_ =median_of_a(lowercase__ , lowercase__ , start + ((end - start) // 2) + 1 , end - 1 )
a_ =partition(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
intro_sort(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =p
return insertion_sort(lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = input('''Enter numbers separated by a comma : ''').strip()
lowercase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 41
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 1
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =int(lowercase__ )
assert noofclusters < len(lowercase__ )
# Find out the dimensionality
a_ =len(vectors[0] )
# Will help select random centroids from among the available vectors
a_ =list(range(len(lowercase__ ) ) )
shuffle(lowercase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
a_ =tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
a_ =tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
a_ =[
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
a_ =tf.placeholder("float64" , [dim] )
a_ =[]
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase__ , lowercase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
a_ =[tf.Variable(0 ) for i in range(len(lowercase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
a_ =tf.placeholder("int32" )
a_ =[]
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase__ , lowercase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
a_ =tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
a_ =tf.reduce_mean(lowercase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
a_ =tf.placeholder("float" , [dim] )
a_ =tf.placeholder("float" , [dim] )
a_ =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase__ , lowercase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
a_ =tf.placeholder("float" , [noofclusters] )
a_ =tf.argmin(lowercase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
a_ =tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
a_ =1_0_0
for _ in range(lowercase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase__ ) ):
a_ =vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
a_ =[
sess.run(lowercase__ , feed_dict={va: vect, va: sess.run(lowercase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
a_ =sess.run(
lowercase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase__ ):
# Collect all the vectors assigned to this cluster
a_ =[
vectors[i]
for i in range(len(lowercase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
a_ =sess.run(
lowercase__ , feed_dict={mean_input: array(lowercase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
a_ =sess.run(lowercase__ )
a_ =sess.run(lowercase__ )
return centroids, assignments
| 41
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 1
|
'''simple docstring'''
class UpperCAmelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> None:
"""simple docstring"""
a_ =row
a_ =col
a_ =graph
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> None:
"""simple docstring"""
a_ =[-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
a_ =[-1, 0, 1, -1, 1, -1, 0, 1]
a_ =True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase_):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase_)
def lowercase_ ( self) -> int: # And finally, count all islands.
"""simple docstring"""
a_ =[[False for j in range(self.COL)] for i in range(self.ROW)]
a_ =0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
count += 1
return count
| 41
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Union[str, Any] = ["image_processor", "feature_extractor"]
__magic_name__ : List[Any] = "TvltImageProcessor"
__magic_name__ : Optional[int] = "TvltFeatureExtractor"
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_)
a_ =image_processor
a_ =feature_extractor
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , *lowerCAmelCase_ , **lowerCAmelCase_ , ) -> str:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process.")
a_ =None
if images is not None:
a_ =self.image_processor(lowerCAmelCase_ , mask_pixel=lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_)
if images_mixed is not None:
a_ =self.image_processor(lowerCAmelCase_ , is_mixed=lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_)
if audio is not None:
a_ =self.feature_extractor(
lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , mask_audio=lowerCAmelCase_ , **lowerCAmelCase_)
a_ ={}
if audio is not None:
output_dict.update(lowerCAmelCase_)
if images is not None:
output_dict.update(lowerCAmelCase_)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase_)
return output_dict
@property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.image_processor.model_input_names
a_ =self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase = random.Random()
def UpperCAmelCase_ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
'''simple docstring'''
if rng is None:
a_ =global_rng
a_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=2_0_0_0 , lowerCAmelCase_=1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1_6_0_0_0 , lowerCAmelCase_=True , lowerCAmelCase_=8_0 , lowerCAmelCase_=1_6 , lowerCAmelCase_=6_4 , lowerCAmelCase_="hann_window" , lowerCAmelCase_=8_0 , lowerCAmelCase_=7_6_0_0 , lowerCAmelCase_=1e-10 , lowerCAmelCase_=True , ) -> Any:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =min_seq_length
a_ =max_seq_length
a_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a_ =feature_size
a_ =padding_value
a_ =sampling_rate
a_ =do_normalize
a_ =num_mel_bins
a_ =hop_length
a_ =win_length
a_ =win_function
a_ =fmin
a_ =fmax
a_ =mel_floor
a_ =return_attention_mask
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowercase_ ( self , lowerCAmelCase_=False , lowerCAmelCase_=False) -> Any:
"""simple docstring"""
def _flatten(lowerCAmelCase_):
return list(itertools.chain(*lowerCAmelCase_))
if equal_length:
a_ =floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a_ =[
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a_ =[np.asarray(lowerCAmelCase_) for x in speech_inputs]
return speech_inputs
def lowercase_ ( self , lowerCAmelCase_=False , lowerCAmelCase_=False) -> Any:
"""simple docstring"""
if equal_length:
a_ =[floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
a_ =[
floats_list((x, self.num_mel_bins))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a_ =[np.asarray(lowerCAmelCase_) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = SpeechTaFeatureExtractor
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =SpeechTaFeatureExtractionTester(self)
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCAmelCase_ , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase_ , axis=0) - 1) < 1e-3))
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a_ =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
a_ =[np.asarray(lowerCAmelCase_) for speech_input in speech_inputs]
# Test not batched input
a_ =feat_extract(speech_inputs[0] , return_tensors="np").input_values
a_ =feat_extract(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3))
# Test batched
a_ =feat_extract(lowerCAmelCase_ , return_tensors="np").input_values
a_ =feat_extract(lowerCAmelCase_ , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3))
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
a_ =["longest", "max_length", "do_not_pad"]
a_ =[None, 1_6_0_0, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_):
a_ =feat_extract(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors="np")
a_ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0])
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0])
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0])
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ =range(8_0_0 , 1_4_0_0 , 2_0_0)
a_ =[floats_list((1, x))[0] for x in lengths]
a_ =["longest", "max_length", "do_not_pad"]
a_ =[None, 1_6_0_0, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_):
a_ =feat_extract(lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_)
a_ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0])
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0])
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
a_ =feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=1_0_0_0 , padding="max_length" , return_tensors="np")
a_ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
a_ =feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=1_0_0_0 , padding="longest" , return_tensors="np")
a_ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0))
a_ =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
a_ =feat_extract(
lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=2_0_0_0 , padding="longest" , return_tensors="np")
a_ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0))
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a_ =np.random.rand(1_0_0).astype(np.floataa)
a_ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a_ =feature_extractor.pad([{"input_values": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a_ =feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a_ =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
a_ =[np.asarray(lowerCAmelCase_) for speech_input in speech_inputs]
# Test feature size
a_ =feature_extractor(audio_target=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="np").input_values
self.assertTrue(input_values.ndim == 3)
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins)
# Test not batched input
a_ =feature_extractor(speech_inputs[0] , return_tensors="np").input_values
a_ =feature_extractor(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3))
# Test batched
a_ =feature_extractor(lowerCAmelCase_ , return_tensors="np").input_values
a_ =feature_extractor(lowerCAmelCase_ , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3))
# Test 2-D numpy arrays are batched.
a_ =[floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a_ =np.asarray(lowerCAmelCase_)
a_ =feature_extractor(lowerCAmelCase_ , return_tensors="np").input_values
a_ =feature_extractor(lowerCAmelCase_ , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3))
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.feat_extract_tester.prepare_inputs_for_target()
a_ =self.feature_extraction_class(**self.feat_extract_dict)
a_ =feat_extract.model_input_names[0]
a_ =BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase_) == len(lowerCAmelCase_) for x, y in zip(lowerCAmelCase_ , processed_features[input_name])))
a_ =self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase_)
a_ =BatchFeature({input_name: speech_inputs} , tensor_type="np")
a_ =processed_features[input_name]
if len(batch_features_input.shape) < 3:
a_ =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase_)
a_ =self.feature_extraction_class(**self.feat_extract_dict)
a_ =feat_extract.model_input_names[0]
a_ =BatchFeature({input_name: speech_inputs} , tensor_type="pt")
a_ =processed_features[input_name]
if len(batch_features_input.shape) < 3:
a_ =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.feature_extraction_class(**self.feat_extract_dict)
a_ =self.feat_extract_tester.prepare_inputs_for_target()
a_ =feat_extract.model_input_names[0]
a_ =BatchFeature({input_name: speech_inputs})
a_ =feat_extract.num_mel_bins # hack!
a_ =feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="np")[input_name]
a_ =feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.feat_extract_dict
a_ =True
a_ =self.feature_extraction_class(**lowerCAmelCase_)
a_ =self.feat_extract_tester.prepare_inputs_for_target()
a_ =[len(lowerCAmelCase_) for x in speech_inputs]
a_ =feat_extract.model_input_names[0]
a_ =BatchFeature({input_name: speech_inputs})
a_ =feat_extract.num_mel_bins # hack!
a_ =feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , lowerCAmelCase_)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.feat_extract_dict
a_ =True
a_ =self.feature_extraction_class(**lowerCAmelCase_)
a_ =self.feat_extract_tester.prepare_inputs_for_target()
a_ =[len(lowerCAmelCase_) for x in speech_inputs]
a_ =feat_extract.model_input_names[0]
a_ =BatchFeature({input_name: speech_inputs})
a_ =min(lowerCAmelCase_)
a_ =feat_extract.num_mel_bins # hack!
a_ =feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="np")
self.assertIn("attention_mask" , lowerCAmelCase_)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
from datasets import load_dataset
a_ =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
a_ =ds.sort("id").select(range(lowerCAmelCase_))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03])
# fmt: on
a_ =self._load_datasamples(1)
a_ =SpeechTaFeatureExtractor()
a_ =feature_extractor(lowerCAmelCase_ , return_tensors="pt").input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0))
self.assertTrue(torch.allclose(input_values[0, :3_0] , lowerCAmelCase_ , atol=1e-6))
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8])
# fmt: on
a_ =self._load_datasamples(1)
a_ =SpeechTaFeatureExtractor()
a_ =feature_extractor(audio_target=lowerCAmelCase_ , return_tensors="pt").input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0))
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , lowerCAmelCase_ , atol=1e-4))
| 41
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 1
|
'''simple docstring'''
import sys
import turtle
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
triangle(lowercase__ , get_mid(lowercase__ , lowercase__ ) , get_mid(lowercase__ , lowercase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowercase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 41
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.