code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ["""note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ['note_seq'] )
@classmethod
def __A ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
requires_backends(cls , ['note_seq'] )
@classmethod
def __A ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(cls , ['note_seq'] )
| 247
|
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCamelCase = logging.getLogger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=16 , SCREAMING_SNAKE_CASE_ : int = 10 , SCREAMING_SNAKE_CASE_ : int = 2 ) -> List[str]:
def get_dataset(SCREAMING_SNAKE_CASE_ : int ):
SCREAMING_SNAKE_CASE = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
SCREAMING_SNAKE_CASE = get_dataset(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = get_dataset(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=None ) -> Any:
SCREAMING_SNAKE_CASE = []
for epoch in range(SCREAMING_SNAKE_CASE_ ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch
SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
def __A ( self , lowerCAmelCase__ ) -> Dict:
return x * self.a + self.b
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(total_limit=1 , project_dir=lowerCAmelCase__ , automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'initial' )
accelerator.save_state(lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.load_state(lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = train(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save everything
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'checkpoint' )
accelerator.save_state(lowerCAmelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCAmelCase__ )
test_rands += train(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.load_state(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_0' ) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = train(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE = Accelerator()
with self.assertRaises(lowerCAmelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __A ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(lowerCAmelCase__ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE = scheduler.state_dict()
train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotEqual(lowerCAmelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(lowerCAmelCase__ , scheduler.state_dict() )
def __A ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCAmelCase__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCamelCase = '''/tmp/accelerate/state_checkpointing'''
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__UpperCamelCase,__UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCamelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCamelCase,__UpperCamelCase,__UpperCamelCase,__UpperCamelCase,__UpperCamelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCamelCase,__UpperCamelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCamelCase = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__UpperCamelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__UpperCamelCase = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__UpperCamelCase = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 247
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(lowerCAmelCase_ ) as metadata_file:
__lowercase : Tuple = json.load(lowerCAmelCase_ )
__lowercase : Dict = LukeConfig(use_entity_aware_attention=lowerCAmelCase_ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
__lowercase : str = torch.load(lowerCAmelCase_ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
__lowercase : str = load_original_entity_vocab(lowerCAmelCase_ )
# add an entry for [MASK2]
__lowercase : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__lowercase : Tuple = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
__lowercase : str = AddedToken("""<ent>""" , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ )
__lowercase : Any = AddedToken("""<ent2>""" , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """r""" ) as f:
__lowercase : Union[str, Any] = json.load(lowerCAmelCase_ )
__lowercase : List[str] = """MLukeTokenizer"""
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = MLukeTokenizer.from_pretrained(lowerCAmelCase_ )
# Initialize the embeddings of the special tokens
__lowercase : List[Any] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
__lowercase : Tuple = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
__lowercase : Any = state_dict["""embeddings.word_embeddings.weight"""]
__lowercase : str = word_emb[ent_init_index].unsqueeze(0 )
__lowercase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
__lowercase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__lowercase : Optional[Any] = state_dict[bias_name]
__lowercase : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
__lowercase : int = decoder_bias[enta_init_index].unsqueeze(0 )
__lowercase : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowercase : List[str] = F"encoder.layer.{layer_index}.attention.self."
__lowercase : Optional[Any] = state_dict[prefix + matrix_name]
__lowercase : Tuple = state_dict[prefix + matrix_name]
__lowercase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowercase : List[str] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
__lowercase : List[str] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
__lowercase : Optional[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__lowercase : Optional[Any] = state_dict["""entity_predictions.bias"""]
__lowercase : Optional[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
__lowercase : Tuple = torch.cat([entity_prediction_bias, entity_mask_bias] )
__lowercase : Any = LukeForMaskedLM(config=lowerCAmelCase_ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
__lowercase : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
__lowercase : List[str] = state_dict[key]
else:
__lowercase : Union[str, Any] = state_dict[key]
__lowercase , __lowercase : Union[str, Any] = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
if set(lowerCAmelCase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(lowerCAmelCase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__lowercase : List[str] = MLukeTokenizer.from_pretrained(lowerCAmelCase_ , task="""entity_classification""" )
__lowercase : Optional[Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
__lowercase : Any = (0, 9)
__lowercase : Dict = tokenizer(lowerCAmelCase_ , entity_spans=[span] , return_tensors="""pt""" )
__lowercase : int = model(**lowerCAmelCase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowercase : List[str] = torch.Size((1, 33, 768) )
__lowercase : Dict = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowercase : Any = torch.Size((1, 1, 768) )
__lowercase : Optional[Any] = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__lowercase : List[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase_ )
__lowercase : Optional[Any] = """Tokyo is the capital of <mask>."""
__lowercase : Optional[Any] = (24, 30)
__lowercase : Dict = tokenizer(lowerCAmelCase_ , entity_spans=[span] , return_tensors="""pt""" )
__lowercase : Tuple = model(**lowerCAmelCase_ )
__lowercase : Any = encoding["""input_ids"""][0].tolist()
__lowercase : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
__lowercase : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase_ )
__lowercase : List[Any] = outputs.entity_logits[0][0].argmax().item()
__lowercase : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCAmelCase_ ) )
model.save_pretrained(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[str] ):
__lowercase : Any = ["""[MASK]""", """[PAD]""", """[UNK]"""]
__lowercase : Tuple = [json.loads(lowerCAmelCase_ ) for line in open(lowerCAmelCase_ )]
__lowercase : Optional[int] = {}
for entry in data:
__lowercase : Tuple = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__lowercase : List[Any] = entity_id
break
__lowercase : Optional[int] = F"{language}:{entity_name}"
__lowercase : Any = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCamelCase : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 649
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 1
|
from manim import *
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Tuple = Rectangle(height=0.5 , width=0.5 )
_lowercase : str = Rectangle(height=0.25 , width=0.25 )
_lowercase : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowercase : List[str] = [mem.copy() for i in range(6 )]
_lowercase : int = [mem.copy() for i in range(6 )]
_lowercase : int = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_lowercase : int = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_lowercase : Tuple = VGroup(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_lowercase : Optional[Any] = Text('''CPU''' , font_size=24 )
_lowercase : Optional[int] = Group(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0.5 , aligned_edge=UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase )
_lowercase : str = [mem.copy() for i in range(4 )]
_lowercase : Any = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_lowercase : int = Text('''GPU''' , font_size=24 )
_lowercase : Optional[Any] = Group(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0.5 , aligned_edge=UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase )
_lowercase : Optional[Any] = [mem.copy() for i in range(6 )]
_lowercase : Optional[int] = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_lowercase : List[Any] = Text('''Model''' , font_size=24 )
_lowercase : Optional[int] = Group(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0.5 , aligned_edge=UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase )
_lowercase : List[str] = []
_lowercase : str = []
_lowercase : Union[str, Any] = []
for i, rect in enumerate(UpperCamelCase ):
rect.set_stroke(UpperCamelCase )
_lowercase : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCamelCase , buff=0.0 )
self.add(UpperCamelCase )
model_cpu_arr.append(UpperCamelCase )
self.add(*UpperCamelCase , *UpperCamelCase , *UpperCamelCase )
_lowercase : List[Any] = [mem.copy() for i in range(6 )]
_lowercase : Optional[int] = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_lowercase : Optional[int] = Text('''Loaded Checkpoint''' , font_size=24 )
_lowercase : str = Group(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0.5 , aligned_edge=UpperCamelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCamelCase )
_lowercase : Optional[Any] = []
_lowercase : Union[str, Any] = []
for i, rect in enumerate(UpperCamelCase ):
_lowercase : Tuple = fill.copy().set_fill(UpperCamelCase , opacity=0.7 )
target.move_to(UpperCamelCase )
ckpt_arr.append(UpperCamelCase )
_lowercase : List[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCamelCase )
self.add(*UpperCamelCase , *UpperCamelCase )
_lowercase : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase : int = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase , UpperCamelCase )
_lowercase : Optional[int] = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCamelCase )
_lowercase : List[Any] = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
_lowercase : Tuple = [meta_mem.copy() for i in range(6 )]
_lowercase : Optional[int] = [meta_mem.copy() for i in range(6 )]
_lowercase : Union[str, Any] = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_lowercase : Any = VGroup(*UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_lowercase : Dict = VGroup(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0 )
_lowercase : Dict = Text('''Disk''' , font_size=24 )
_lowercase : Tuple = Group(UpperCamelCase , UpperCamelCase ).arrange(UpperCamelCase , buff=0.5 , aligned_edge=UpperCamelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCamelCase , run_time=3 ) , Write(UpperCamelCase , run_time=1 ) , Create(UpperCamelCase , run_time=1 ) )
_lowercase : Any = []
for i, rect in enumerate(UpperCamelCase ):
_lowercase : str = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCamelCase , run_time=1.5 ) )
self.play(*UpperCamelCase )
self.play(FadeOut(UpperCamelCase ) )
_lowercase : Dict = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase , run_time=3 ) )
self.play(
FadeOut(UpperCamelCase , UpperCamelCase , *UpperCamelCase , *UpperCamelCase ) , )
self.wait()
| 322
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = '''mra'''
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[Any]=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : Tuple=12 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : int=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Tuple=0.02 , UpperCamelCase : Union[str, Any]=1E-5 , UpperCamelCase : Tuple="absolute" , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Optional[Any]="full" , UpperCamelCase : List[str]=0 , UpperCamelCase : Tuple=0 , UpperCamelCase : Any=1 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[int]=2 , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_lowercase : List[Any] = vocab_size
_lowercase : Any = max_position_embeddings
_lowercase : List[str] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : Any = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : int = initializer_range
_lowercase : Dict = type_vocab_size
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : Tuple = position_embedding_type
_lowercase : List[str] = block_per_row
_lowercase : int = approx_mode
_lowercase : Optional[Any] = initial_prior_first_n_blocks
_lowercase : Dict = initial_prior_diagonal_n_blocks
| 322
| 1
|
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
a__: Optional[int] = []
for part_id in partition_order:
a__: List[Any] = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) ->Union[str, Any]:
a__: Tuple = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__: Union[str, Any] = spark.range(100 ).repartition(1 )
a__: Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) ->Tuple:
a__: Dict = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__: Optional[Any] = spark.range(10 ).repartition(2 )
a__: Optional[Any] = [1, 0]
a__: Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
a__: Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a__: Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) ->Optional[Any]:
a__: Optional[int] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__: Optional[int] = spark.range(10 ).repartition(1 )
a__: Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) ->Optional[int]:
a__: Optional[int] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__: str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
a__: Union[str, Any] = lambda _SCREAMING_SNAKE_CASE : x.reverse()
a__: Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
a__: List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
a__: Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) ->Union[str, Any]:
a__: Any = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__: Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a__: List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a__: List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
a__: Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a__: Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a__: List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
a__: Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ) ->List[str]:
a__: Dict = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
a__: Tuple = spark.range(100 ).repartition(1 )
a__: Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 703
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase__ = TypeVar('T')
class __snake_case ( Generic[T] ):
def __init__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = data
a__: Node[T] | None = None
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
class __snake_case ( Generic[T] ):
def __init__( self) -> None:
'''simple docstring'''
a__: Node[T] | None = None
def __iter__( self) -> Iterator[T]:
'''simple docstring'''
a__: Union[str, Any] = self.top
while node:
yield node.data
a__: Optional[Any] = node.next
def __str__( self) -> str:
'''simple docstring'''
return "->".join([str(lowercase) for item in self])
def __len__( self) -> int:
'''simple docstring'''
return len(tuple(iter(self)))
def lowerCamelCase_ ( self) -> bool:
'''simple docstring'''
return self.top is None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Any = Node(lowercase)
if not self.is_empty():
a__: str = self.top
a__: Optional[int] = node
def lowerCamelCase_ ( self) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack')
assert isinstance(self.top , lowercase)
a__: Tuple = self.top
a__: List[Any] = self.top.next
return pop_node.data
def lowerCamelCase_ ( self) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack')
assert self.top is not None
return self.top.data
def lowerCamelCase_ ( self) -> None:
'''simple docstring'''
a__: Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 217
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple = logging.getLogger(__name__)
__lowercase : int = tf.data.AUTOTUNE
def lowercase_ ( ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : str = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=_lowerCamelCase , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=_lowerCamelCase , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=_lowerCamelCase , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=_lowerCamelCase , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=_lowerCamelCase , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=_lowerCamelCase , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=_lowerCamelCase , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=_lowerCamelCase , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=_lowerCamelCase , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowerCamelCase , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=_lowerCamelCase , default=1e-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=_lowerCamelCase , default=1e-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=_lowerCamelCase , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=_lowerCamelCase , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=_lowerCamelCase , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowerCamelCase_ : Optional[Any] = parser.parse_args()
return args
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
try:
if args.tpu_name:
lowerCamelCase_ : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowerCamelCase_ : str = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(_lowerCamelCase )
tf.tpu.experimental.initialize_tpu_system(_lowerCamelCase )
return tpu
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Any = 0
for file in file_list:
lowerCamelCase_ : int = file.split('''/''' )[-1]
lowerCamelCase_ : Tuple = re.search(R'''-\d+-(\d+)\.tfrecord''' , _lowerCamelCase ).group(1 )
lowerCamelCase_ : Any = int(_lowerCamelCase )
num_samples += sample_count
return num_samples
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : List[str] = count_samples(_lowerCamelCase )
lowerCamelCase_ : str = tf.data.Dataset.from_tensor_slices(_lowerCamelCase )
if shuffle:
lowerCamelCase_ : Dict = dataset.shuffle(len(_lowerCamelCase ) )
lowerCamelCase_ : Union[str, Any] = tf.data.TFRecordDataset(_lowerCamelCase , num_parallel_reads=_lowerCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowerCamelCase_ : List[Any] = dataset.apply(tf.data.experimental.assert_cardinality(_lowerCamelCase ) )
lowerCamelCase_ : Any = dataset.map(_lowerCamelCase , num_parallel_calls=_lowerCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
lowerCamelCase_ : List[str] = dataset.shuffle(args.shuffle_buffer_size )
lowerCamelCase_ : Optional[Any] = dataset.batch(_lowerCamelCase , drop_remainder=_lowerCamelCase )
lowerCamelCase_ : str = dataset.map(_lowerCamelCase , num_parallel_calls=_lowerCamelCase )
lowerCamelCase_ : Any = dataset.prefetch(_lowerCamelCase )
return dataset
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
if not args.no_tpu:
lowerCamelCase_ : List[str] = initialize_tpu(_lowerCamelCase )
lowerCamelCase_ : str = tf.distribute.TPUStrategy(_lowerCamelCase )
else:
lowerCamelCase_ : Optional[int] = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer )
lowerCamelCase_ : Any = AutoConfig.from_pretrained(args.pretrained_model_config )
lowerCamelCase_ : Tuple = tokenizer.vocab_size
lowerCamelCase_ : Dict = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F"""No .tfrecord files found in {args.train_dataset}.""" )
lowerCamelCase_ : Tuple = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F"""No .tfrecord files found in {args.eval_dataset}.""" )
lowerCamelCase_ : Union[str, Any] = count_samples(_lowerCamelCase )
lowerCamelCase_ : Tuple = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowerCamelCase_ : List[str] = steps_per_epoch * args.num_epochs
with strategy.scope():
lowerCamelCase_ : Optional[int] = TFAutoModelForMaskedLM.from_config(_lowerCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowerCamelCase_ : Dict = create_optimizer(
num_train_steps=_lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowerCamelCase , metrics=['''accuracy'''] )
def decode_fn(_lowercase ):
lowerCamelCase_ : Any = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowerCamelCase , _lowerCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowerCamelCase_ : List[str] = DataCollatorForLanguageModeling(
tokenizer=_lowerCamelCase , mlm_probability=args.mlm_probability , mlm=_lowerCamelCase , return_tensors='''tf''' )
def mask_with_collator(_lowercase ):
# TF really needs an isin() function
lowerCamelCase_ : List[Any] = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowerCamelCase_ : List[str] = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(_lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowerCamelCase , )
return batch
lowerCamelCase_ : str = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowerCamelCase_ : List[str] = prepare_dataset(
_lowerCamelCase , decode_fn=_lowerCamelCase , mask_fn=_lowerCamelCase , batch_size=_lowerCamelCase , shuffle=_lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
lowerCamelCase_ : str = prepare_dataset(
_lowerCamelCase , decode_fn=_lowerCamelCase , mask_fn=_lowerCamelCase , batch_size=_lowerCamelCase , shuffle=_lowerCamelCase , )
lowerCamelCase_ : List[str] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowerCamelCase ) )
model.fit(
_lowerCamelCase , validation_data=_lowerCamelCase , epochs=args.num_epochs , callbacks=_lowerCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : List[Any] = parse_args()
main(args)
| 422
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : int = (DDPMScheduler,)
def UpperCamelCase__ ( self : Union[str, Any] , **lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**lowerCAmelCase__ )
return config
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE : List[Any] = pred_prev_sample
__SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_model()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE : Optional[int] = pred_prev_sample
__SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase__ ):
if i == len(lowerCAmelCase__ ) - 1:
__SCREAMING_SNAKE_CASE : List[str] = -1
else:
__SCREAMING_SNAKE_CASE : Dict = timesteps[i + 1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.previous_timestep(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = prev_t.item()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase__ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
| 578
| 0
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = JukeboxTokenizer
__magic_name__ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCAmelCase ( self ):
"""simple docstring"""
import torch
A_ : List[Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
A_ : List[str] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
A_ : Optional[int] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ):
"""simple docstring"""
import torch
A_ : List[Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
A_ : Optional[Any] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
A_ : int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 236
|
from __future__ import annotations
from collections.abc import Callable
def a__ ( a , a , a , a = 1_0_0 , ) -> float:
A_ : Any = x_start
A_ : int = fnc(a )
A_ : int = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
A_ : List[Any] = (x_end - x_start) / steps + xa
A_ : Dict = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
A_ : Optional[int] = xa
A_ : List[str] = fxa
return area
if __name__ == "__main__":
def a__ ( a ) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 236
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __UpperCamelCase ( unittest.TestCase ):
lowercase : str = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase : Union[str, Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a__ ( self :Union[str, Any] ):
snake_case_ : int = pipeline(task="""text-generation""" ,model="""sshleifer/tiny-ctrl""" ,framework="""pt""" )
# Using `do_sample=False` to force deterministic output
snake_case_ : int = text_generator("""This is a test""" ,do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] ,)
snake_case_ : int = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] ,)
snake_case_ : Tuple = text_generator("""This is a test""" ,do_sample=__SCREAMING_SNAKE_CASE ,num_return_sequences=2 ,return_tensors=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{"""generated_token_ids""": ANY(__SCREAMING_SNAKE_CASE )},
{"""generated_token_ids""": ANY(__SCREAMING_SNAKE_CASE )},
] ,)
snake_case_ : Optional[int] = text_generator.model.config.eos_token_id
snake_case_ : int = """<pad>"""
snake_case_ : List[str] = text_generator(
["""This is a test""", """This is a second test"""] ,do_sample=__SCREAMING_SNAKE_CASE ,num_return_sequences=2 ,batch_size=2 ,return_tensors=__SCREAMING_SNAKE_CASE ,)
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
[
{"""generated_token_ids""": ANY(__SCREAMING_SNAKE_CASE )},
{"""generated_token_ids""": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"""generated_token_ids""": ANY(__SCREAMING_SNAKE_CASE )},
{"""generated_token_ids""": ANY(__SCREAMING_SNAKE_CASE )},
],
] ,)
@require_tf
def a__ ( self :Optional[int] ):
snake_case_ : List[Any] = pipeline(task="""text-generation""" ,model="""sshleifer/tiny-ctrl""" ,framework="""tf""" )
# Using `do_sample=False` to force deterministic output
snake_case_ : Tuple = text_generator("""This is a test""" ,do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] ,)
snake_case_ : List[str] = text_generator(["""This is a test""", """This is a second test"""] ,do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] ,)
def a__ ( self :str ,_UpperCamelCase :Dict ,_UpperCamelCase :str ,_UpperCamelCase :int ):
snake_case_ : List[str] = TextGenerationPipeline(model=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE )
return text_generator, ["This is a test", "Another test"]
def a__ ( self :int ):
snake_case_ : str = """Hello I believe in"""
snake_case_ : int = pipeline("""text-generation""" ,model="""hf-internal-testing/tiny-random-gpt2""" )
snake_case_ : Tuple = text_generator(__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] ,)
snake_case_ : Optional[Any] = text_generator(__SCREAMING_SNAKE_CASE ,stop_sequence=""" fe""" )
self.assertEqual(__SCREAMING_SNAKE_CASE ,[{"""generated_text""": """Hello I believe in fe"""}] )
def a__ ( self :Dict ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ):
snake_case_ : Optional[Any] = text_generator.model
snake_case_ : str = text_generator.tokenizer
snake_case_ : Union[str, Any] = text_generator("""This is a test""" )
self.assertEqual(__SCREAMING_SNAKE_CASE ,[{"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
snake_case_ : str = text_generator("""This is a test""" ,return_full_text=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,[{"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}] )
self.assertNotIn("""This is a test""" ,outputs[0]["""generated_text"""] )
snake_case_ : List[str] = pipeline(task="""text-generation""" ,model=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE ,return_full_text=__SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = text_generator("""This is a test""" )
self.assertEqual(__SCREAMING_SNAKE_CASE ,[{"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}] )
self.assertNotIn("""This is a test""" ,outputs[0]["""generated_text"""] )
snake_case_ : List[Any] = text_generator("""This is a test""" ,return_full_text=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,[{"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
snake_case_ : Any = text_generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
[{"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}, {"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}],
[{"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}, {"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}],
] ,)
if text_generator.tokenizer.pad_token is not None:
snake_case_ : Tuple = text_generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
[{"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}, {"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}],
[{"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}, {"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}],
] ,)
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case_ : int = text_generator("""test""" ,return_full_text=__SCREAMING_SNAKE_CASE ,return_text=__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case_ : List[Any] = text_generator("""test""" ,return_full_text=__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case_ : List[Any] = text_generator("""test""" ,return_text=__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ : List[Any] = text_generator("""""" )
self.assertEqual(__SCREAMING_SNAKE_CASE ,[{"""generated_text""": ANY(__SCREAMING_SNAKE_CASE )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ : str = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ : int = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 ,max_new_tokens=2_0 )
snake_case_ : Union[str, Any] = text_generator("""This is a test""" * 5_0_0 ,handle_long_generation="""hole""" ,max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
text_generator(
"""This is a test""" * 5_0_0 ,handle_long_generation="""hole""" ,max_new_tokens=tokenizer.model_max_length + 1_0 ,)
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self :str ):
import torch
# Classic `model_kwargs`
snake_case_ : Tuple = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" ,model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} ,)
self.assertEqual(pipe.model.device ,torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.bfloataa )
snake_case_ : List[Any] = pipe("""This is a test""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] ,)
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device_map="""auto""" ,torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device ,torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.bfloataa )
snake_case_ : Dict = pipe("""This is a test""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] ,)
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ : str = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device_map="""auto""" )
self.assertEqual(pipe.model.device ,torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.floataa )
snake_case_ : List[Any] = pipe("""This is a test""" )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] ,)
@require_torch
@require_torch_gpu
def a__ ( self :str ):
import torch
snake_case_ : str = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device=0 ,torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def a__ ( self :int ):
import torch
snake_case_ : Union[str, Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" ,device_map="""auto""" ,torch_dtype=torch.floataa )
pipe("""This is a test""" ,do_sample=__SCREAMING_SNAKE_CASE ,top_p=0.5 )
def a__ ( self :Optional[Any] ):
snake_case_ : List[str] = """Hello world"""
snake_case_ : Tuple = pipeline("""text-generation""" ,model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
snake_case_ : Any = logging.get_logger("""transformers.generation.tf_utils""" )
else:
snake_case_ : Optional[Any] = logging.get_logger("""transformers.generation.utils""" )
snake_case_ : Optional[int] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
snake_case_ : Tuple = text_generator(__SCREAMING_SNAKE_CASE ,max_length=1_0 ,max_new_tokens=1 )
self.assertIn(__SCREAMING_SNAKE_CASE ,cl.out )
# The user only sets one -> no warning
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
snake_case_ : Optional[int] = text_generator(__SCREAMING_SNAKE_CASE ,max_new_tokens=1 )
self.assertNotIn(__SCREAMING_SNAKE_CASE ,cl.out )
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
snake_case_ : List[Any] = text_generator(__SCREAMING_SNAKE_CASE ,max_length=1_0 )
self.assertNotIn(__SCREAMING_SNAKE_CASE ,cl.out )
| 334
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ =logging.get_logger()
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_128s''' , pretrained=UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_128''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 1_92:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_192''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 2_56:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_256''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 3_84:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_384''' , pretrained=UpperCAmelCase__ )
from_model.eval()
__SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE = from_model.state_dict()
__SCREAMING_SNAKE_CASE = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for i in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.randn((2, 3, 2_24, 2_24) )
__SCREAMING_SNAKE_CASE = from_model(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = our_model(UpperCAmelCase__ ).logits
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE = name
print(UpperCAmelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = (1, num_labels)
__SCREAMING_SNAKE_CASE = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = partial(UpperCAmelCase__ , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
__SCREAMING_SNAKE_CASE = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCAmelCase__ , names_to_config[model_name] , UpperCAmelCase__ , UpperCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
lowerCAmelCase__ =parser.parse_args()
lowerCAmelCase__ =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 482
| 0
|
'''simple docstring'''
from math import pi, sqrt, tan
def _SCREAMING_SNAKE_CASE ( A : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _SCREAMING_SNAKE_CASE ( A : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def _SCREAMING_SNAKE_CASE ( A : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__snake_case : Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(A , 2 ) * torus_radius * tube_radius
def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def _SCREAMING_SNAKE_CASE ( A : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__snake_case : List[Any] = (sidea + sidea + sidea) / 2
__snake_case : int = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def _SCREAMING_SNAKE_CASE ( A : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def _SCREAMING_SNAKE_CASE ( A : float , A : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def _SCREAMING_SNAKE_CASE ( A : int , A : float ) -> float:
"""simple docstring"""
if not isinstance(A , A ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f'''Rectangle: {area_rectangle(1_0, 2_0) = }''')
print(f'''Square: {area_square(1_0) = }''')
print(f'''Triangle: {area_triangle(1_0, 1_0) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }''')
print(f'''Parallelogram: {area_parallelogram(1_0, 2_0) = }''')
print(f'''Rhombus: {area_rhombus(1_0, 2_0) = }''')
print(f'''Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }''')
print(f'''Circle: {area_circle(2_0) = }''')
print(f'''Ellipse: {area_ellipse(1_0, 2_0) = }''')
print('''\nSurface Areas of various geometric shapes: \n''')
print(f'''Cube: {surface_area_cube(2_0) = }''')
print(f'''Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }''')
print(f'''Sphere: {surface_area_sphere(2_0) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(2_0) = }''')
print(f'''Cone: {surface_area_cone(1_0, 2_0) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }''')
print(f'''Cylinder: {surface_area_cylinder(1_0, 2_0) = }''')
print(f'''Torus: {surface_area_torus(2_0, 1_0) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 1_0) = }''')
print(f'''Square: {area_reg_polygon(4, 1_0) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 1_0) = }''')
| 61
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = VQModel
_snake_case = """sample"""
@property
def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str:
"""simple docstring"""
__snake_case : Dict = 4
__snake_case : Optional[int] = 3
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a)
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
__snake_case : List[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a)
self.assertIsNotNone(__a)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(__a)
__snake_case : Any = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy')
model.to(__a).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
__snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
__snake_case : Optional[int] = image.to(__a)
with torch.no_grad():
__snake_case : List[Any] = model(__a).sample
__snake_case : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143])
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
| 61
| 1
|
"""simple docstring"""
UpperCamelCase_ : List[str] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def A_ ():
'''simple docstring'''
A_ = input("Enter message: " )
A_ = input("Enter key [alphanumeric]: " )
A_ = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
A_ = "encrypt"
A_ = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("d" ):
A_ = "decrypt"
A_ = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(f'\n{mode.title()}ed message:' )
print(UpperCAmelCase__ )
def A_ (__a , __a ):
'''simple docstring'''
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , "encrypt" )
def A_ (__a , __a ):
'''simple docstring'''
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , "decrypt" )
def A_ (__a , __a , __a ):
'''simple docstring'''
A_ = []
A_ = 0
A_ = key.upper()
for symbol in message:
A_ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
A_ = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 115
|
lowercase = 8.314_4598
def __lowerCAmelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> float:
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase = 3_0_0
lowercase = 2_8
lowercase = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 272
| 0
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 ):
"""simple docstring"""
a__ : Any = tokenizer
a__ : Tuple = dataset
a__ : List[Any] = len(__UpperCAmelCase ) if n_tasks is None else n_tasks
a__ : Any = n_copies
def __iter__( self ):
"""simple docstring"""
a__ : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a__ : Union[str, Any] = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = start_length
a__ : Union[str, Any] = eof_strings
a__ : Dict = tokenizer
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a__ : List[str] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Dict:
a__ : Union[str, Any] = re.split("(%s)" % "|".join(__UpperCamelCase ) , __UpperCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=20 , **__UpperCamelCase ) -> Optional[Any]:
a__ : Optional[int] = defaultdict(__UpperCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__UpperCamelCase ) ):
with torch.no_grad():
a__ : str = batch["ids"].shape[-1]
a__ : List[Any] = accelerator.unwrap_model(__UpperCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__UpperCamelCase , **__UpperCamelCase )
# each task is generated batch_size times
a__ : List[Any] = batch["task_id"].repeat(__UpperCamelCase )
a__ : Optional[int] = accelerator.pad_across_processes(
__UpperCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
a__ : Optional[int] = accelerator.gather((generated_tokens, generated_tasks) )
a__ : List[str] = generated_tokens.cpu().numpy()
a__ : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__UpperCamelCase , __UpperCamelCase ):
gen_token_dict[task].append(__UpperCamelCase )
a__ : Dict = [[] for _ in range(__UpperCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a__ : str = tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
code_gens[task].append(remove_last_block(__UpperCamelCase ) )
return code_gens
def SCREAMING_SNAKE_CASE( ) -> Tuple:
# Setup configuration
a__ : Union[str, Any] = HfArgumentParser(__UpperCamelCase )
a__ : Dict = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a__ : List[Any] = "false"
if args.num_workers is None:
a__ : str = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a__ : Optional[int] = Accelerator()
set_seed(args.seed , device_specific=__UpperCamelCase )
# Load model and tokenizer
a__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
a__ : int = tokenizer.eos_token
a__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a__ : Dict = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __UpperCamelCase , __UpperCamelCase )] ),
}
# Load evaluation dataset and metric
a__ : List[str] = load_dataset("openai_humaneval" )
a__ : Optional[int] = load_metric("code_eval" )
a__ : Any = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a__ : Optional[int] = args.n_samples // args.batch_size
a__ : str = TokenizedDataset(__UpperCamelCase , human_eval["test"] , n_copies=__UpperCamelCase , n_tasks=__UpperCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
a__ : Optional[int] = DataLoader(__UpperCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a__ : Union[str, Any] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a__ : str = accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
a__ : str = complete_code(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , n_tasks=__UpperCamelCase , batch_size=args.batch_size , **__UpperCamelCase , )
if accelerator.is_main_process:
a__ : str = []
for task in tqdm(range(__UpperCamelCase ) ):
a__ : Tuple = human_eval["test"][task]["test"]
a__ : Dict = F'check({human_eval["test"][task]["entry_point"]})'
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a__ : Optional[int] = code_eval_metric.compute(
references=__UpperCamelCase , predictions=__UpperCamelCase , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 719
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 207
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Any = {"vocab_file": "vocab.json"}
a : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
a : str = {"mgp-str": 27}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case , snake_case="[GO]" , snake_case="[GO]" , snake_case="[s]" , snake_case="[GO]" , **snake_case ):
'''simple docstring'''
super().__init__(
unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , pad_token=snake_case , **snake_case , )
with open(snake_case , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase : Dict = json.load(snake_case )
UpperCAmelCase : Tuple = {v: k for k, v in self.vocab.items()}
@property
def A_ ( self ):
'''simple docstring'''
return len(self.vocab )
def A_ ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = []
for s in text:
char_tokens.extend(snake_case )
return char_tokens
def A_ ( self , snake_case ):
'''simple docstring'''
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def A_ ( self , snake_case ):
'''simple docstring'''
return self.decoder.get(snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case ) )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + "\n" )
return (vocab_file,)
| 679
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case )
UpperCAmelCase : int = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case )
UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = TFResNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : str = layer_type
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Any = model(**snake_case )
# verify the logits
UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 679
| 1
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
lowerCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str | None:
snake_case : str = ""
snake_case : int
snake_case : int
snake_case : int
for keychar, cipherchar in zip(cycle(lowercase ) ,lowercase ):
snake_case : Union[str, Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase )
return decoded
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[str]:
snake_case : list[str] = []
for key in product(lowercase ,repeat=3 ):
snake_case : Dict = try_key(lowercase ,lowercase )
if encoded is not None:
possibles.append(lowercase )
return possibles
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( lowercase = "p059_cipher.txt" ) -> int:
snake_case : list[int]
snake_case : list[str]
snake_case : str
snake_case : str
snake_case : str = Path(lowercase ).parent.joinpath(lowercase ).read_text(encoding="""utf-8""" )
snake_case : List[Any] = [int(lowercase ) for number in data.strip().split(""",""" )]
snake_case : Optional[int] = filter_valid_chars(lowercase )
for common_word in COMMON_WORDS:
snake_case : str = filter_common_word(lowercase ,lowercase )
if len(lowercase ) == 1:
break
snake_case : str = possibles[0]
return sum(ord(lowercase ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684
| 1
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
__lowerCAmelCase : Any = False
__lowerCAmelCase : Tuple = False
def __lowerCAmelCase ( __UpperCamelCase : Namespace ):
'''simple docstring'''
return TrainCommand(__UpperCamelCase )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : str = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=_lowercase , required=_lowercase , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=_lowercase , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=_lowercase , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=_lowercase , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=_lowercase , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=_lowercase , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=_lowercase , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=_lowercase , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=_lowercase , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=_lowercase , default=3_2 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=_lowercase , default=6_4 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=_lowercase , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=_lowercase , default=1E-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = logging.get_logger("""transformers-cli/training""" )
snake_case_ : Dict = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=_lowercase )
snake_case_ : List[Any] = args.output
snake_case_ : Optional[int] = args.column_label
snake_case_ : List[str] = args.column_text
snake_case_ : int = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
snake_case_ : Any = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
snake_case_ : Tuple = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case_ : Tuple = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
snake_case_ : List[Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case_ : Dict = args.validation_split
snake_case_ : Tuple = args.train_batch_size
snake_case_ : List[Any] = args.valid_batch_size
snake_case_ : List[Any] = args.learning_rate
snake_case_ : str = args.adam_epsilon
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = StableDiffusionSAGPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Optional[Any]:
torch.manual_seed(0)
_lowerCamelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_lowerCamelCase : int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_lowerCamelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
_lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
_lowerCamelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> List[Any]:
if str(SCREAMING_SNAKE_CASE).startswith("""mps"""):
_lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""")
_lowerCamelCase : Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE)
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = """."""
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = sag_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""")
_lowerCamelCase : Dict = output.images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""")
_lowerCamelCase : Dict = sag_pipe.to(SCREAMING_SNAKE_CASE)
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = """."""
_lowerCamelCase : List[str] = torch.manual_seed(0)
_lowerCamelCase : int = sag_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""")
_lowerCamelCase : Any = output.images
_lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""")
_lowerCamelCase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE)
sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = """."""
_lowerCamelCase : Union[str, Any] = torch.manual_seed(0)
_lowerCamelCase : Optional[int] = sag_pipe(
[prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
_lowerCamelCase : Union[str, Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 88
| 0
|
from maths.prime_check import is_prime
def __snake_case ( lowercase : int ):
if not isinstance(A__ , A__ ):
snake_case_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(A__ )
if is_prime(A__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=99 , UpperCAmelCase_=32 , UpperCAmelCase_=5 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=5_12 , UpperCAmelCase_=16 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=3 , UpperCAmelCase_=4 , UpperCAmelCase_=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def _lowercase ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = DistilBertModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = DistilBertForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = DistilBertForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = self.num_labels
snake_case_ = DistilBertForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = self.num_labels
snake_case_ = DistilBertForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = self.num_choices
snake_case_ = DistilBertForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ):
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs
snake_case_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
snake_case = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case = True
snake_case = True
snake_case = True
snake_case = True
def _lowercase ( self ):
snake_case_ = DistilBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , dim=37 )
def _lowercase ( self ):
self.config_tester.run_common_tests()
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase_ )
@slow
def _lowercase ( self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DistilBertModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@slow
@require_torch_gpu
def _lowercase ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
snake_case_ = True
snake_case_ = model_class(config=UpperCAmelCase_ )
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = torch.jit.trace(
UpperCAmelCase_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "traced_model.pt" ) )
snake_case_ = torch.jit.load(os.path.join(UpperCAmelCase_ , "traced_model.pt" ) , map_location=UpperCAmelCase_ )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase_ ) , inputs_dict["attention_mask"].to(UpperCAmelCase_ ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self ):
snake_case_ = DistilBertModel.from_pretrained("distilbert-base-uncased" )
snake_case_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
snake_case_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
snake_case_ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1e-4 ) )
| 420
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:Optional[int] , _a:Tuple=7 , _a:Dict=3 , _a:Optional[Any]=18 , _a:Optional[Any]=30 , _a:Union[str, Any]=4_00 , _a:str=True , _a:Optional[Any]=32 , _a:Tuple=True , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size_divisor
snake_case__ = do_rescale
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size_divisor''' ) )
self.assertTrue(hasattr(_a , '''resample''' ) )
self.assertTrue(hasattr(_a , '''do_rescale''' ) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 33
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case =[
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
__snake_case ={
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
__snake_case ={F'''funnel-transformer/{name}''': 512 for name in _model_names}
__snake_case ={F'''funnel-transformer/{name}''': {"""do_lower_case""": True} for name in _model_names}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = FunnelTokenizer
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = 2
def __init__( self : str , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int="<unk>" , UpperCAmelCase__ : Any="<sep>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Tuple="<cls>" , UpperCAmelCase__ : List[Any]="<mask>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Union[str, Any]="##" , **UpperCAmelCase__ : Tuple , ) -> Dict:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , clean_text=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , wordpieces_prefix=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**UpperCAmelCase__ )
lowerCAmelCase = do_lower_case
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 133
| 0
|
import numpy
# List of input, output pairs
UpperCamelCase__ =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase__ =(((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCamelCase__ =[2, 4, 1, 5]
UpperCamelCase__ =len(train_data)
UpperCamelCase__ =0.009
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase="train" ):
return calculate_hypothesis_value(__lowerCamelCase, __lowerCamelCase ) - output(
__lowerCamelCase, __lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = 0
for i in range(len(__lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=m ):
_SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(__lowerCamelCase ):
if index == -1:
summation_value += _error(__lowerCamelCase )
else:
summation_value += _error(__lowerCamelCase ) * train_data[i][0][index]
return summation_value
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = summation_of_cost_derivative(__lowerCamelCase, __lowerCamelCase ) / m
return cost_derivative_value
def lowerCamelCase__ ():
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_SCREAMING_SNAKE_CASE : Dict = 0.00_0002
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while True:
j += 1
_SCREAMING_SNAKE_CASE : List[str] = [0, 0, 0, 0]
for i in range(0, len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE : int = get_cost_derivative(i - 1 )
_SCREAMING_SNAKE_CASE : Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCamelCase, __lowerCamelCase, atol=__lowerCamelCase, rtol=__lowerCamelCase, ):
break
_SCREAMING_SNAKE_CASE : Optional[int] = temp_parameter_vector
print(("Number of iterations:", j) )
def lowerCamelCase__ ():
for i in range(len(__lowerCamelCase ) ):
print(("Actual output value:", output(__lowerCamelCase, "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__lowerCamelCase, "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 381
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
set_seed(770)
UpperCamelCase__ ={
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
UpperCamelCase__ ={
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
UpperCamelCase__ =os.path.dirname(os.path.abspath(__file__))
UpperCamelCase__ =os.path.join(os.path.expanduser('~'), '.cache')
UpperCamelCase__ =os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCamelCase, REMOTE_MODEL_PATHS[key]["file_name"] )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
hf_hub_download(repo_id=__lowerCamelCase, filename=__lowerCamelCase, local_dir=__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase="text" ):
if model_type == "text":
_SCREAMING_SNAKE_CASE : List[Any] = BarkSemanticModel
_SCREAMING_SNAKE_CASE : Any = BarkSemanticConfig
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_SCREAMING_SNAKE_CASE : List[str] = BarkCoarseModel
_SCREAMING_SNAKE_CASE : Any = BarkCoarseConfig
_SCREAMING_SNAKE_CASE : str = BarkCoarseGenerationConfig
elif model_type == "fine":
_SCREAMING_SNAKE_CASE : Optional[int] = BarkFineModel
_SCREAMING_SNAKE_CASE : List[str] = BarkFineConfig
_SCREAMING_SNAKE_CASE : Optional[int] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_SCREAMING_SNAKE_CASE : List[str] = f"""{model_type}_small""" if use_small else model_type
_SCREAMING_SNAKE_CASE : Optional[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCamelCase ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"], model_info["file_name"] )
_SCREAMING_SNAKE_CASE : str = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )
# this is a hack
_SCREAMING_SNAKE_CASE : Any = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_SCREAMING_SNAKE_CASE : Optional[int] = model_args["vocab_size"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_SCREAMING_SNAKE_CASE : List[Any] = model_args.pop("n_head" )
_SCREAMING_SNAKE_CASE : Dict = model_args.pop("n_embd" )
_SCREAMING_SNAKE_CASE : Tuple = model_args.pop("n_layer" )
_SCREAMING_SNAKE_CASE : Tuple = ConfigClass(**checkpoint["model_args"] )
_SCREAMING_SNAKE_CASE : int = ModelClass(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfigClass()
_SCREAMING_SNAKE_CASE : Optional[int] = model_generation_config
_SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint["model"]
# fixup checkpoint
_SCREAMING_SNAKE_CASE : Optional[Any] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
_SCREAMING_SNAKE_CASE : Optional[int] = k[len(__lowerCamelCase ) :]
for old_layer_name in new_layer_name_dict:
_SCREAMING_SNAKE_CASE : Tuple = new_k.replace(__lowerCamelCase, new_layer_name_dict[old_layer_name] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
_SCREAMING_SNAKE_CASE : int = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_SCREAMING_SNAKE_CASE : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_SCREAMING_SNAKE_CASE : List[str] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(__lowerCamelCase ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(__lowerCamelCase ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = model.num_parameters(exclude_embeddings=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = checkpoint["best_val_loss"].item()
logger.info(f"""model loaded: {round(n_params/1e6, 1 )}M params, {round(__lowerCamelCase, 3 )} loss""" )
model.eval()
model.to(__lowerCamelCase )
del checkpoint, state_dict
return model
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" # do conversion on cpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = _get_ckpt_path(__lowerCamelCase, use_small=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = _load_model(__lowerCamelCase, __lowerCamelCase, model_type=__lowerCamelCase, use_small=__lowerCamelCase )
# load bark initial model
_SCREAMING_SNAKE_CASE : Union[str, Any] = _bark_load_model(__lowerCamelCase, "cpu", model_type=__lowerCamelCase, use_small=__lowerCamelCase )
if model_type == "text":
_SCREAMING_SNAKE_CASE : str = bark_model["model"]
if model.num_parameters(exclude_embeddings=__lowerCamelCase ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_SCREAMING_SNAKE_CASE : Optional[Any] = 5
_SCREAMING_SNAKE_CASE : Optional[int] = 10
if model_type in ["text", "coarse"]:
_SCREAMING_SNAKE_CASE : Any = torch.randint(256, (batch_size, sequence_length), dtype=torch.int )
_SCREAMING_SNAKE_CASE : Optional[int] = bark_model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
# take last logits
_SCREAMING_SNAKE_CASE : List[str] = output_new_model_total.logits[:, [-1], :]
else:
_SCREAMING_SNAKE_CASE : Tuple = 3
_SCREAMING_SNAKE_CASE : Union[str, Any] = 8
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int )
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = bark_model(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Dict = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Dict = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_SCREAMING_SNAKE_CASE : int = BarkSemanticModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = BarkCoarseModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkFineModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_SCREAMING_SNAKE_CASE : Any = BarkConfig.from_sub_model_configs(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config )
_SCREAMING_SNAKE_CASE : str = BarkModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = semantic
_SCREAMING_SNAKE_CASE : Tuple = coarseAcoustic
_SCREAMING_SNAKE_CASE : List[str] = fineAcoustic
_SCREAMING_SNAKE_CASE : Tuple = codec
_SCREAMING_SNAKE_CASE : Tuple = bark_generation_config
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
bark.save_pretrained(__lowerCamelCase, repo_id=__lowerCamelCase, push_to_hub=__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
UpperCamelCase__ =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 381
| 1
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 43
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : int =[
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
_UpperCamelCase : Dict =[
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__lowerCamelCase = int(re.match(R'''.*layer_(\d*).*''' , A_ )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase_ ( A_ ):
if dtype == torch.bool:
return 1 / 8
__lowerCamelCase = re.search(R'''[^\d](\d+)$''' , str(A_ ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
__lowerCamelCase = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase_ ( A_ , A_ , A_ , A_ , A_ ):
# Construct model
if bloom_config_file == "":
__lowerCamelCase = BloomConfig()
else:
__lowerCamelCase = BloomConfig.from_json_file(A_ )
if shard_model:
__lowerCamelCase = os.listdir(A_ )
__lowerCamelCase = sorted(filter(lambda A_ : s.startswith('''layer''' ) and "model_00" in s , A_ ) )
__lowerCamelCase = {'''weight_map''': {}, '''metadata''': {}}
__lowerCamelCase = 0
__lowerCamelCase = None
__lowerCamelCase = BloomConfig()
for j, file in enumerate(A_ ):
print('''Processing file: {}'''.format(A_ ) )
__lowerCamelCase = None
for i in range(A_ ):
# load all TP files
__lowerCamelCase = file.replace('''model_00''' , f'''model_0{i}''' )
__lowerCamelCase = torch.load(os.path.join(A_ , A_ ) , map_location='''cpu''' )
# Rename keys in the transformers names
__lowerCamelCase = list(temp.keys() )
for key in keys:
__lowerCamelCase = temp.pop(A_ )
if tensors is None:
__lowerCamelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCamelCase = torch.cat([tensors[key], temp[key]] , dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCamelCase = tensors[key] / pretraining_tp
torch.save(
A_ , os.path.join(
A_ , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(A_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__lowerCamelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__lowerCamelCase = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(A_ ) ).zfill(5 ) )
__lowerCamelCase = BloomConfig()
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__lowerCamelCase = total_size
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A_ , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
__lowerCamelCase = json.dumps(A_ , indent=2 , sort_keys=A_ ) + '''\n'''
f.write(A_ )
else:
__lowerCamelCase = BloomModel(A_ )
__lowerCamelCase = os.listdir(A_ )
__lowerCamelCase = sorted(filter(lambda A_ : s.startswith('''layer''' ) and "model_00" in s , A_ ) )
__lowerCamelCase = None
for i, file in enumerate(A_ ):
__lowerCamelCase = None
for i in range(A_ ):
# load all TP files
__lowerCamelCase = file.replace('''model_00''' , f'''model_0{i}''' )
__lowerCamelCase = torch.load(os.path.join(A_ , A_ ) , map_location='''cpu''' )
# Rename keys in the transformers names
__lowerCamelCase = list(temp.keys() )
for key in keys:
__lowerCamelCase = temp.pop(A_ )
if tensors is None:
__lowerCamelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCamelCase = torch.cat([tensors[key], temp[key]] , dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCamelCase = tensors[key] / pretraining_tp
__lowerCamelCase = model.load_state_dict(A_ , strict=A_ )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
__lowerCamelCase = set(other_keys.missing_keys )
else:
__lowerCamelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(A_ , exist_ok=A_ )
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
__lowerCamelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , A_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
_UpperCamelCase : Optional[int] =parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 316
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LayoutLMv2FeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140
|
from math import factorial, pi
def lowercase ( a , a = 30 ):
'''simple docstring'''
if not isinstance(a , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(a , a ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
SCREAMING_SNAKE_CASE_ :Optional[int] = float(a )
SCREAMING_SNAKE_CASE_ :Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(a ) )
def lowercase ( a , a = 30 ):
'''simple docstring'''
if not isinstance(a , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(a , a ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = float(a )
SCREAMING_SNAKE_CASE_ :List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 140
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : Optional[int] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = None
def __init__( self : int , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Tuple="<unk>" , _lowerCAmelCase : Optional[Any]="<s>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : str=False , _lowerCAmelCase : Any=False , **_lowerCAmelCase : int , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _lowerCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ = getattr(_lowerCAmelCase , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = pre_tok_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = add_prefix_space
def lowerCAmelCase_ ( self : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = kwargs.get('is_split_into_words' , _lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.' )
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = kwargs.get('is_split_into_words' , _lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.' )
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : "Conversation" ):
SCREAMING_SNAKE_CASE_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ = input_ids[-self.model_max_length :]
return input_ids
| 31
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : str , lowercase_ : Dict=13 , lowercase_ : Dict=32 , lowercase_ : Any=3 , lowercase_ : Any=4 , lowercase_ : int=[10, 20, 30, 40] , lowercase_ : Union[str, Any]=[2, 2, 3, 2] , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : Tuple=["stage2", "stage3", "stage4"] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = num_stages
SCREAMING_SNAKE_CASE_ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Optional[int] = depths
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = out_features
SCREAMING_SNAKE_CASE_ : List[str] = out_indices
SCREAMING_SNAKE_CASE_ : int = scope
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConvNextBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = ConvNextModelTester(self)
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''')
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''')
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(lowercase_) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : int = ConvNextModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''') if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : str = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-0.02_60, -0.47_39, 0.19_11]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
__UpperCamelCase = ConvNextConfig
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = ConvNextModelTester(self)
| 512
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self :Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase__ : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase__ : Any = test_metrics
@require_cpu
def lowercase_ ( self :Dict ) -> List[str]:
"""simple docstring"""
debug_launcher(self.test_metrics.main ,num_processes=1 )
@require_cpu
def lowercase_ ( self :int ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowercase_ ( self :Any ) -> Optional[Any]:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def lowercase_ ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""" )
lowerCamelCase__ : Optional[int] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case ,env=os.environ.copy() )
| 705
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : Dict = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Any = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = pipe.dual_guided(
prompt='''first prompt''' ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
lowerCamelCase__ : Dict = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : Dict = generator.manual_seed(0 )
lowerCamelCase__ : List[str] = pipe.dual_guided(
prompt='''first prompt''' ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowercase_ ( self :Tuple ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : Tuple = '''cyberpunk 2077'''
lowerCamelCase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : str = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = pipe.dual_guided(
prompt=__UpperCAmelCase ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ,).images
lowerCamelCase__ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCamelCase__ : Tuple = '''A painting of a squirrel eating a burger '''
lowerCamelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe.text_to_image(
prompt=__UpperCAmelCase ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ).images
lowerCamelCase__ : List[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCamelCase__ : Any = pipe.image_variation(__UpperCAmelCase ,generator=__UpperCAmelCase ,output_type='''numpy''' ).images
lowerCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : str = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 121
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : Any = ["""torch""", """torchsde"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
| 337
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __a ( A ) -> Union[str, Any]:
'''simple docstring'''
A__ = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __a ( A , A ) -> str:
'''simple docstring'''
A__ = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __a ( A ) -> List[str]:
'''simple docstring'''
A__ = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def __a ( ) -> Dict:
'''simple docstring'''
A__ = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def __a ( A , A , A , A ) -> int:
'''simple docstring'''
A__ = "imagenet-1k-id2label.json"
A__ = 1_000
A__ = "huggingface/label-files"
A__ = num_labels
A__ = json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
A__ = {int(A ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = A__ = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
A__ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
A__ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A__ = [2, 2, 20]
A__ = [3, 12, 16]
A__ = [192, 768, 1_024]
A__ = CvtForImageClassification(A )
A__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
A__ = image_size
A__ = torch.load(A , map_location=torch.device("cpu" ) )
A__ = OrderedDict()
A__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A__ = list_of_state_dict + cls_token(A )
A__ = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
A__ = list_of_state_dict + attention(A , A )
A__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
A__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__UpperCAmelCase =parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 337
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __snake_case( __A ):
_A = '''xlm-roberta'''
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = classifier_dropout
class __snake_case( __A ):
@property
def A ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 718
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
for char in word:
_SCREAMING_SNAKE_CASE = ord(UpperCamelCase__ )
if not _is_chinese_char(UpperCamelCase__ ):
return 0
return 1
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = set()
for token in tokens:
_SCREAMING_SNAKE_CASE = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ )
if chinese_word:
word_set.add(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = list(UpperCamelCase__ )
return word_list
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_SCREAMING_SNAKE_CASE = max([len(UpperCamelCase__ ) for w in chinese_word_set] )
_SCREAMING_SNAKE_CASE = bert_tokens
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, len(UpperCamelCase__ )
while start < end:
_SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
_SCREAMING_SNAKE_CASE = min(end - start , UpperCamelCase__ )
for i in range(UpperCamelCase__ , 1 , -1 ):
_SCREAMING_SNAKE_CASE = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_SCREAMING_SNAKE_CASE = '''##''' + bert_word[j]
_SCREAMING_SNAKE_CASE = start + i
_SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
_SCREAMING_SNAKE_CASE = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
_SCREAMING_SNAKE_CASE = [get_chinese_word(UpperCamelCase__ ) for r in res]
ltp_res.extend(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
_SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ):
_SCREAMING_SNAKE_CASE = []
for id in input_ids:
_SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(UpperCamelCase__ )
input_tokens.append(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase__ ):
if token[:2] == "##":
_SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ):
ref_id.append(UpperCamelCase__ )
ref_ids.append(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
return ref_ids
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
_SCREAMING_SNAKE_CASE = f.readlines()
_SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
_SCREAMING_SNAKE_CASE = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
_SCREAMING_SNAKE_CASE = [json.dumps(UpperCamelCase__ ) + '''\n''' for ref in ref_ids]
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
lowerCamelCase : str = parser.parse_args()
main(args)
| 168
| 0
|
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7
|
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38
| 0
|
from __future__ import annotations
def __lowerCamelCase ( __a :list[int | float] , __a :int , __a :int ) -> int | float:
"""simple docstring"""
if len(__a ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(__a )
or left < -len(__a )
or right >= len(__a )
or right < -len(__a )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
A__ = (left + right) >> 1 # the middle
A__ = find_max(__a , __a , __a ) # find max in range[left, mid]
A__ = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 247
|
import os
import string
import sys
A : Dict = 1 << 8
A : Dict = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
A : Any = KEYMAP['''up''']
A : Optional[Any] = KEYMAP['''left''']
if sys.platform == "win32":
A : Optional[Any] = []
A : str = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
A : Tuple = ord(str(i))
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
A__ = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__a ) == 0:
# Read the keystroke
A__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
A__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
A__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__a )
if ord(__a ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
A__ = chr(KEYMAP["""esc"""] )
except KeyError:
A__ = cha[1]
else:
A__ = ch.decode(__a )
else:
A__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
A__ = sys.stdin.fileno()
A__ = termios.tcgetattr(__a )
try:
tty.setraw(__a )
A__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(__a , termios.TCSADRAIN , __a )
return ch
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ = get_raw_chars()
if ord(__a ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__a ) == KEYMAP["esc"]:
A__ = get_raw_chars()
if ord(__a ) == KEYMAP["mod_int"]:
A__ = get_raw_chars()
if ord(__a ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__a ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__a ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 247
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCAmelCase = 128
elif "12-12" in model_name:
_lowerCAmelCase = 12
_lowerCAmelCase = 12
elif "14-14" in model_name:
_lowerCAmelCase = 14
_lowerCAmelCase = 14
elif "16-16" in model_name:
_lowerCAmelCase = 16
_lowerCAmelCase = 16
else:
raise ValueError("Model not supported" )
_lowerCAmelCase = "huggingface/label-files"
if "speech-commands" in model_name:
_lowerCAmelCase = 35
_lowerCAmelCase = "speech-commands-v2-id2label.json"
else:
_lowerCAmelCase = 527
_lowerCAmelCase = "audioset-id2label.json"
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if "module.v" in name:
_lowerCAmelCase = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
_lowerCAmelCase = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
_lowerCAmelCase = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
_lowerCAmelCase = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
_lowerCAmelCase = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_lowerCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
_lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCAmelCase = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
_lowerCAmelCase = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
_lowerCAmelCase = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
_lowerCAmelCase = key.split("." )
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def __a(SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
_lowerCAmelCase = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
'''simple docstring'''
_lowerCAmelCase = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
_lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
_lowerCAmelCase = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCAmelCase = -4.267_7393 if "speech-commands" not in model_name else -6.84_5978
_lowerCAmelCase = 4.568_9974 if "speech-commands" not in model_name else 5.565_4526
_lowerCAmelCase = 1024 if "speech-commands" not in model_name else 128
_lowerCAmelCase = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
_lowerCAmelCase = load_dataset("speech_commands" , "v0.02" , split="validation" )
_lowerCAmelCase = dataset[0]["audio"]["array"]
else:
_lowerCAmelCase = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
_lowerCAmelCase , _lowerCAmelCase = torchaudio.load(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = waveform.squeeze().numpy()
_lowerCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=16000 , return_tensors="pt" )
# forward pass
_lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 18
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ : Optional[Any] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Dict = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 295
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowerCAmelCase ( lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ = StableDiffusionLatentUpscalePipeline
A__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
A__ = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
A__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
A__ = True
@property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : str = (16, 16)
_SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=snake_case__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=snake_case__ , only_cross_attention=snake_case__ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
_SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
_SCREAMING_SNAKE_CASE : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
_SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="quick_gelu" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE : List[Any] = CLIPTextModel(snake_case__ )
_SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_SCREAMING_SNAKE_CASE : int = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
_SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(snake_case__ )
else:
_SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_SCREAMING_SNAKE_CASE : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = "cpu"
_SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : str = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = pipe(**snake_case__ ).images
_SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
_SCREAMING_SNAKE_CASE : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
_SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**snake_case__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = 2
_SCREAMING_SNAKE_CASE : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_SCREAMING_SNAKE_CASE : List[str] = getattr(snake_case__ , scheduler_enum.name )
_SCREAMING_SNAKE_CASE : Dict = scheduler_cls.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE : int = pipe(**snake_case__ )[0]
outputs.append(snake_case__ )
assert check_same_shape(snake_case__ )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(33 )
_SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
_SCREAMING_SNAKE_CASE : int = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_SCREAMING_SNAKE_CASE : str = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(snake_case__ , generator=snake_case__ , output_type="latent" ).images
_SCREAMING_SNAKE_CASE : Tuple = upscaler(
prompt=snake_case__ , image=snake_case__ , num_inference_steps=20 , guidance_scale=0 , generator=snake_case__ , output_type="np" , ).images[0]
_SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = torch.manual_seed(33 )
_SCREAMING_SNAKE_CASE : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
_SCREAMING_SNAKE_CASE : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
_SCREAMING_SNAKE_CASE : Optional[Any] = upscaler(
prompt=snake_case__ , image=snake_case__ , num_inference_steps=20 , guidance_scale=0 , generator=snake_case__ , output_type="np" , ).images[0]
_SCREAMING_SNAKE_CASE : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 295
| 1
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
UpperCamelCase__ : str = logging.getLogger(__name__)
UpperCamelCase__ : Union[str, Any] = '''Hello world! cécé herlolip'''
UpperCamelCase__ : Tuple = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = BertAbsConfig(
temp_dir='.' , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AbsSummarizer(lowerCamelCase_ , torch.device('cpu' ) , lowerCamelCase_ )
original.eval()
SCREAMING_SNAKE_CASE_ : Dict = BertAbsSummarizer(lowerCamelCase_ , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
SCREAMING_SNAKE_CASE_ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE_ : Dict = encoder_input_ids
SCREAMING_SNAKE_CASE_ : Dict = decoder_input_ids
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE_ : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE_ : int = original.generator(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE_ : str = new_model.generator(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Dict = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
UpperCamelCase__ : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 105
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = TypeVar('DatasetType', Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE_ ( __A : List[DatasetType] , __A : Optional[List[float]] = None , __A : Optional[int] = None , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
a_ , a_ : Tuple = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[DatasetType] , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
a_ , a_ : Optional[int] = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 570
| 0
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Union[str, Any]=10 ):
'''simple docstring'''
A_ : str = []
for _ in range(__lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCamelCase ( __lowercase : Dict ,__lowercase : Optional[int]=10 ):
'''simple docstring'''
A_ : List[str] = []
for step in range(__lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : int = os.path.join(__lowercase ,'schedule.bin' )
torch.save(scheduler.state_dict() ,__lowercase )
A_ : Any = torch.load(__lowercase )
scheduler.load_state_dict(__lowercase )
return lrs
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
self.assertEqual(len(lowercase ) , len(lowercase ) )
for a, b in zip(lowercase , lowercase ):
self.assertAlmostEqual(lowercase , lowercase , delta=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase )
A_ : int = torch.tensor([0.4, 0.2, -0.5] )
A_ : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ : Tuple = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
A_ : Optional[Any] = criterion(lowercase , lowercase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase )
A_ : List[Any] = torch.tensor([0.4, 0.2, -0.5] )
A_ : Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ : List[str] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowercase , weight_decay=0.0 , relative_step=lowercase , scale_parameter=lowercase , warmup_init=lowercase , )
for _ in range(1_0_0_0 ):
A_ : Optional[Any] = criterion(lowercase , lowercase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
lowerCamelCase_ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCamelCase_ = 1_0
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
self.assertEqual(len(lowercase ) , len(lowercase ) )
for a, b in zip(lowercase , lowercase ):
self.assertAlmostEqual(lowercase , lowercase , delta=lowercase , msg=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = {'num_warmup_steps': 2, 'num_training_steps': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ : str = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ : Optional[int] = data
A_ : Union[str, Any] = scheduler_func(self.optimizer , **lowercase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ : Tuple = unwrap_schedule(lowercase , self.num_steps )
self.assertListAlmostEqual(
lowercase , lowercase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
A_ : Tuple = scheduler_func(self.optimizer , **lowercase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowercase ) # wrap to test picklability of the schedule
A_ : Optional[int] = unwrap_and_save_reload_schedule(lowercase , self.num_steps )
self.assertListEqual(lowercase , lowercase , msg=F'''failed for {scheduler_func} in save and reload''' )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = fn
def __call__( self , *lowercase , **lowercase ):
"""simple docstring"""
return self.fn(*lowercase , **lowercase )
@classmethod
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = list(map(self , scheduler.lr_lambdas ) )
| 70
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
'''simple docstring'''
A_ , A_ : Any = 9, 14 # noqa: F841
A_ : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
A_ : Tuple = mst(__lowercase )
A_ : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
A_ : List[Any] = tuple(answer[:2] )
A_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 70
| 1
|
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"""{test_file} instead.""" )
__UpperCAmelCase = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__UpperCAmelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
__UpperCAmelCase = '''.'''.join(UpperCamelCase__ )
return test_module_path
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = get_module_path(UpperCamelCase__ )
__UpperCAmelCase = importlib.import_module(UpperCamelCase__ )
return test_module
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
__UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__UpperCAmelCase = getattr(UpperCamelCase__ , '''all_model_classes''' , [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes(UpperCamelCase__ )
__UpperCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = test_class()
if hasattr(UpperCamelCase__ , '''setUp''' ):
test.setUp()
__UpperCAmelCase = None
if hasattr(UpperCamelCase__ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__UpperCAmelCase = test.model_tester.__class__
return model_tester
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes(UpperCamelCase__ )
__UpperCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = []
for test_class in test_classes:
__UpperCAmelCase = get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = get_test_classes(UpperCamelCase__ )
__UpperCAmelCase = {test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = get_model_classes(UpperCamelCase__ )
__UpperCAmelCase = {
model_class: get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = get_model_classes(UpperCamelCase__ )
__UpperCAmelCase = {
model_class: get_tester_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o
| 262
| 1
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase_ = NewType('DataClass', Any)
lowercase_ = NewType('DataClassType', Any)
def a ( A__ : int ) -> Dict:
"""simple docstring"""
if isinstance(A__ , A__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def a ( A__ : list ) -> Callable[[str], Any]:
"""simple docstring"""
_lowercase ={str(A__ ): choice for choice in choices}
return lambda A__ : str_to_choice.get(A__ , A__ )
def a ( *,
A__ : Union[str, List[str]] = None , A__ : str = None , A__ : Any = dataclasses.MISSING , A__ : Callable[[], Any] = dataclasses.MISSING , A__ : dict = None , **A__ : int , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowercase ={}
if aliases is not None:
_lowercase =aliases
if help is not None:
_lowercase =help
return dataclasses.field(metadata=A__ , default=A__ , default_factory=A__ , **A__ )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = 42
def __init__( self , lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if "formatter_class" not in kwargs:
_lowercase =ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase )
if dataclasses.is_dataclass(lowerCAmelCase ):
_lowercase =[dataclass_types]
_lowercase =list(lowerCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase )
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
'''simple docstring'''
_lowercase =F'''--{field.name}'''
_lowercase =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
_lowercase =kwargs.pop('aliases' , [] )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_lowercase =[aliases]
_lowercase =getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase , 'UnionType' ) and isinstance(lowerCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCAmelCase ) not in field.type.__args__:
# filter `str` in Union
_lowercase =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowercase =getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowercase =(
field.type.__args__[0] if isinstance(lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
_lowercase =getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowercase ={}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase ) and issubclass(field.type , lowerCAmelCase )):
if origin_type is Literal:
_lowercase =field.type.__args__
else:
_lowercase =[x.value for x in field.type]
_lowercase =make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
_lowercase =field.default
else:
_lowercase =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowercase =copy(lowerCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
_lowercase =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowercase =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowercase =default
# This tells argparse we accept 0 or 1 value after --field_name
_lowercase ='?'
# This is the value that will get picked if we do --field_name (without value)
_lowercase =True
elif isclass(lowerCAmelCase ) and issubclass(lowerCAmelCase , lowerCAmelCase ):
_lowercase =field.type.__args__[0]
_lowercase ='+'
if field.default_factory is not dataclasses.MISSING:
_lowercase =field.default_factory()
elif field.default is dataclasses.MISSING:
_lowercase =True
else:
_lowercase =field.type
if field.default is not dataclasses.MISSING:
_lowercase =field.default
elif field.default_factory is not dataclasses.MISSING:
_lowercase =field.default_factory()
else:
_lowercase =True
parser.add_argument(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowercase =False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if hasattr(lowerCAmelCase , '_argument_group_name' ):
_lowercase =self.add_argument_group(dtype._argument_group_name )
else:
_lowercase =self
try:
_lowercase =get_type_hints(lowerCAmelCase )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase ):
_lowercase ='.'.join(map(lowerCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase ):
if not field.init:
continue
_lowercase =type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase , lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Tuple[DataClass, ...]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_lowercase =[]
if args_filename:
args_files.append(Path(lowerCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowercase =ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase , type=lowerCAmelCase , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
_lowercase , _lowercase =args_file_parser.parse_known_args(args=lowerCAmelCase )
_lowercase =vars(lowerCAmelCase ).get(args_file_flag.lstrip('-' ) , lowerCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase ) for p in cmd_args_file_paths] )
_lowercase =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowercase =file_args + args if args is not None else file_args + sys.argv[1:]
_lowercase , _lowercase =self.parse_known_args(args=lowerCAmelCase )
_lowercase =[]
for dtype in self.dataclass_types:
_lowercase ={f.name for f in dataclasses.fields(lowerCAmelCase ) if f.init}
_lowercase ={k: v for k, v in vars(lowerCAmelCase ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase , lowerCAmelCase )
_lowercase =dtype(**lowerCAmelCase )
outputs.append(lowerCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
_lowercase =set(args.keys() )
_lowercase =[]
for dtype in self.dataclass_types:
_lowercase ={f.name for f in dataclasses.fields(lowerCAmelCase ) if f.init}
_lowercase ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_lowercase =dtype(**lowerCAmelCase )
outputs.append(lowerCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase )}''' )
return tuple(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
with open(Path(lowerCAmelCase ) , encoding='utf-8' ) as open_json_file:
_lowercase =json.loads(open_json_file.read() )
_lowercase =self.parse_dict(lowerCAmelCase , allow_extra_keys=lowerCAmelCase )
return tuple(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
_lowercase =self.parse_dict(yaml.safe_load(Path(lowerCAmelCase ).read_text() ) , allow_extra_keys=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 721
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def a ( A__ : Optional[int]=32 , A__ : str=10 , A__ : Tuple=100 , A__ : Union[str, Any]=1026 , A__ : Tuple=True , A__ : Dict="data/tokenized_stories_train_wikitext103.jbl" , A__ : Dict="igf_context_pairs.jbl" , ) -> Optional[int]:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
_lowercase , _lowercase =generate_datasets(
A__ , A__ , number=A__ , min_len=1026 , trim=A__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
_lowercase =load_gpta('gpt2' ).to(A__ )
print('computing perplexity on objective set' )
_lowercase =compute_perplexity(A__ , A__ , A__ ).item()
print('perplexity on objective set:' , A__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def a ( A__ : str , A__ : str=15 , A__ : Optional[int]=128 , A__ : Dict=100 , A__ : Optional[Any]="igf_model.pt" , ) -> int:
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
_lowercase =GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
_lowercase =SecondaryLearner(A__ )
# Train secondary learner
_lowercase =train_secondary_learner(
A__ , A__ , max_epochs=A__ , batch_size=A__ , eval_freq=100 , igf_model_path=A__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def a ( A__ : Dict , A__ : Optional[Any] , A__ : str , A__ : Union[str, Any]=32 , A__ : List[str]=1000 , A__ : int=16 , A__ : List[str]=1.0 , A__ : Tuple=recopy_gpta , A__ : Any=None , A__ : Tuple=10 , A__ : Tuple="gpt2_finetuned.pt" , ) -> str:
"""simple docstring"""
_lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
_lowercase =RandomSampler(A__ )
_lowercase =DataLoader(A__ , sampler=A__ )
_lowercase =max_steps // (len(A__ )) + 1
_lowercase =0
_lowercase =torch.zeros((1, context_len) , dtype=torch.long , device=A__ )
_lowercase , _lowercase , _lowercase =recopy_model(A__ , A__ , A__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(A__ )
secondary_learner.eval()
_lowercase =[]
_lowercase =0
_lowercase =[]
_lowercase =[]
# Compute the performance of the transformer model at the beginning
_lowercase =compute_perplexity(A__ , A__ , A__ )
test_perps.append(A__ )
print('Test perplexity, step' , A__ , ':' , A__ )
for epoch in range(int(A__ ) ):
for step, example in enumerate(A__ ):
torch.cuda.empty_cache()
_lowercase =random.randint(0 , example.size(2 ) - context_len - 1 )
_lowercase =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_lowercase =model(A__ , labels=A__ )
_lowercase =True
if secondary_learner is not None:
_lowercase =secondary_learner.forward(
torch.tensor(A__ , dtype=torch.long , device=A__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(A__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_lowercase =-1
if predicted_q < threshold:
_lowercase =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_lowercase =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_lowercase =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_lowercase =compute_perplexity(A__ , A__ , A__ )
test_perps.append(A__ )
print('Test perplexity, step' , A__ , ':' , A__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , A__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def a ( ) -> Optional[int]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=A__ , type=A__ , required=A__ , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=A__ , type=A__ , required=A__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=A__ , default=A__ , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=A__ , default=A__ , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=A__ , type=A__ , required=A__ , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=A__ , type=A__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=A__ , default=A__ , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=A__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=A__ , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=A__ , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=A__ , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=A__ , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=A__ , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=A__ , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=A__ , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=A__ , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=A__ , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=A__ , type=A__ , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=A__ , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=A__ , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=A__ , type=A__ , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=A__ , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
_lowercase =joblib.load('data/IGF_values.jbl' )
# Train secondary learner
_lowercase =training_secondary_learner(
A__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
_lowercase =GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_lowercase , _lowercase =generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=A__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
A__ , A__ , A__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=A__ , secondary_learner=A__ , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 380
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : int = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
SCREAMING_SNAKE_CASE_ = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.state_dict()
def to_tf_var_name(__UpperCamelCase ):
for patt, repl in iter(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = name.replace(__UpperCamelCase , __UpperCamelCase )
return F'''bert/{name}'''
def create_tf_var(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE_ = tf.get_variable(dtype=__UpperCamelCase , shape=tensor.shape , name=__UpperCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE_ = to_tf_var_name(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE_ = torch_tensor.T
SCREAMING_SNAKE_CASE_ = create_tf_var(tensor=__UpperCamelCase , name=__UpperCamelCase , session=__UpperCamelCase )
tf.keras.backend.set_value(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = session.run(__UpperCamelCase )
print(F'''Successfully created {tf_name}: {np.allclose(__UpperCamelCase , __UpperCamelCase )}''' )
SCREAMING_SNAKE_CASE_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__UpperCamelCase , os.path.join(__UpperCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def a__ ( __UpperCamelCase=None ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__UpperCamelCase , required=__UpperCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__UpperCamelCase , required=__UpperCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__UpperCamelCase , required=__UpperCamelCase , help="Directory in which to save tensorflow model" )
SCREAMING_SNAKE_CASE_ = parser.parse_args(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 140
| 0
|
from __future__ import annotations
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] ): # noqa: E741
"""simple docstring"""
while r - l > 1:
_lowerCAmelCase :Optional[int] = (l + r) // 2
if v[m] >= key:
_lowerCAmelCase :List[str] = m
else:
_lowerCAmelCase :Union[str, Any] = m # noqa: E741
return r
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
if len(UpperCamelCase__ ) == 0:
return 0
_lowerCAmelCase :Tuple = [0] * len(UpperCamelCase__ )
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Optional[Any] = v[0]
for i in range(1 , len(UpperCamelCase__ ) ):
if v[i] < tail[0]:
_lowerCAmelCase :Union[str, Any] = v[i]
elif v[i] > tail[length - 1]:
_lowerCAmelCase :Optional[int] = v[i]
length += 1
else:
_lowerCAmelCase :List[Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple = OpenAIGPTTokenizer
lowerCamelCase : List[Any] = OpenAIGPTTokenizerFast
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self: Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase :Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_lowerCAmelCase :Optional[int] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase :Union[str, Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_lowerCAmelCase :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: int ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Union[str, Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase :Any = 'lower'
_lowerCAmelCase :Optional[int] = ['low', 'er</w>']
_lowerCAmelCase :int = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :str = tokens + ['<unk>']
_lowerCAmelCase :Union[str, Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase :List[str] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
_lowerCAmelCase :List[Any] = 'This is a simple input'
_lowerCAmelCase :Any = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase :int = ('This is a simple input', 'This is a pair')
_lowerCAmelCase :Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
pass
| 382
| 0
|
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase : Dict = get_logger(__name__)
def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=0 ):
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case : Optional[int] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case : Dict = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case : int = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case : List[Any] = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case : int = os.path.join(__SCREAMING_SNAKE_CASE , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F'''Saving model to {ckpt_dir}''' )
snake_case : Any = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__SCREAMING_SNAKE_CASE , storage_writer=dist_cp.FileSystemWriter(__SCREAMING_SNAKE_CASE ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__SCREAMING_SNAKE_CASE ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
snake_case : List[str] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
snake_case : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case : Any = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case : List[str] = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
snake_case : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Loading model from {input_model_file}''' )
snake_case : Optional[int] = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case : List[Any] = (
os.path.join(__SCREAMING_SNAKE_CASE , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
snake_case : Any = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__SCREAMING_SNAKE_CASE , storage_reader=dist_cp.FileSystemReader(__SCREAMING_SNAKE_CASE ) , planner=DefaultLoadPlanner() , )
snake_case : List[str] = state_dict['model']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=0 ):
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case : Dict = FSDP.optim_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case : Tuple = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
snake_case : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(__SCREAMING_SNAKE_CASE ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case : int = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
snake_case : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
snake_case : Union[str, Any] = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
snake_case : Union[str, Any] = (
os.path.join(__SCREAMING_SNAKE_CASE , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
snake_case : List[str] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(__SCREAMING_SNAKE_CASE ) , )
snake_case : Optional[Any] = optim_state['optimizer']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
snake_case : Any = FSDP.optim_state_dict_to_load(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
optimizer.load_state_dict(__SCREAMING_SNAKE_CASE )
| 116
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , UpperCamelCase , UpperCamelCase=2 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=10 , UpperCamelCase=3 , UpperCamelCase=32 * 4 , UpperCamelCase=32 * 6 , UpperCamelCase=4 , UpperCamelCase=32 , ) -> str:
UpperCamelCase__ : int = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : Dict = is_training
UpperCamelCase__ : Optional[int] = use_auxiliary_loss
UpperCamelCase__ : List[str] = num_queries
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : str = min_size
UpperCamelCase__ : Union[str, Any] = max_size
UpperCamelCase__ : int = num_labels
UpperCamelCase__ : int = mask_feature_size
def lowerCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase)
UpperCamelCase__ : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase) > 0.5
).float()
UpperCamelCase__ : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase) > 0.5).long()
UpperCamelCase__ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self) -> int:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ : Optional[int] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase) -> str:
UpperCamelCase__ : int = output.encoder_hidden_states
UpperCamelCase__ : Union[str, Any] = output.pixel_decoder_hidden_states
UpperCamelCase__ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCamelCase) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCamelCase) , config.decoder_config.decoder_layers)
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False) -> List[str]:
with torch.no_grad():
UpperCamelCase__ : List[str] = MaskFormerModel(config=UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
UpperCamelCase__ : Union[str, Any] = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase)
UpperCamelCase__ : Any = model(UpperCamelCase , output_hidden_states=UpperCamelCase)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase , UpperCamelCase)
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase) -> List[str]:
UpperCamelCase__ : Tuple = MaskFormerForInstanceSegmentation(config=UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
def comm_check_on_output(UpperCamelCase):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
UpperCamelCase__ : Any = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase)
UpperCamelCase__ : Optional[Any] = model(UpperCamelCase)
comm_check_on_output(UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = model(
pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase)
comm_check_on_output(UpperCamelCase)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase_ = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def lowerCAmelCase__ ( self) -> int:
UpperCamelCase__ : Tuple = MaskFormerModelTester(self)
UpperCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase)
def lowerCAmelCase__ ( self) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self) -> Any:
UpperCamelCase__ , UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase)
def lowerCAmelCase__ ( self) -> Dict:
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase)
@unittest.skip(reason='MaskFormer does not use inputs_embeds')
def lowerCAmelCase__ ( self) -> List[str]:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method')
def lowerCAmelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip(reason='MaskFormer is not a generative model')
def lowerCAmelCase__ ( self) -> Any:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings')
def lowerCAmelCase__ ( self) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def lowerCAmelCase__ ( self) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase__ ( self) -> Union[str, Any]:
pass
def lowerCAmelCase__ ( self) -> str:
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCamelCase)
UpperCamelCase__ : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase)
@slow
def lowerCAmelCase__ ( self) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase__ : Union[str, Any] = MaskFormerModel.from_pretrained(UpperCamelCase)
self.assertIsNotNone(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = (self.model_tester.min_size,) * 2
UpperCamelCase__ : Optional[int] = {
'pixel_values': torch.randn((2, 3, *size) , device=UpperCamelCase),
'mask_labels': torch.randn((2, 10, *size) , device=UpperCamelCase),
'class_labels': torch.zeros(2 , 10 , device=UpperCamelCase).long(),
}
UpperCamelCase__ : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(UpperCamelCase)
UpperCamelCase__ : List[str] = model(**UpperCamelCase)
self.assertTrue(outputs.loss is not None)
def lowerCAmelCase__ ( self) -> Any:
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase)
def lowerCAmelCase__ ( self) -> Tuple:
UpperCamelCase__ , UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(UpperCamelCase).to(UpperCamelCase)
UpperCamelCase__ : List[Any] = model(**UpperCamelCase , output_attentions=UpperCamelCase)
self.assertTrue(outputs.attentions is not None)
def lowerCAmelCase__ ( self) -> int:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ : Optional[Any] = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Union[str, Any] = model_class(UpperCamelCase)
model.to(UpperCamelCase)
model.train()
UpperCamelCase__ : List[str] = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase).loss
loss.backward()
def lowerCAmelCase__ ( self) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ : Any = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = model_class(UpperCamelCase)
model.to(UpperCamelCase)
model.train()
UpperCamelCase__ : str = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase)
UpperCamelCase__ : str = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase__ : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
UpperCAmelCase__ : Any = 1E-4
def _lowercase ( ) -> List[str]:
UpperCamelCase__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self) -> Optional[Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco')
if is_vision_available()
else None
)
def lowerCAmelCase__ ( self) -> Dict:
UpperCamelCase__ : str = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCamelCase , return_tensors='pt').to(UpperCamelCase)
UpperCamelCase__ : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
UpperCamelCase__ : int = model(**UpperCamelCase)
UpperCamelCase__ : List[Any] = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]]).to(UpperCamelCase)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
UpperCamelCase__ : Optional[int] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]]).to(UpperCamelCase)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
UpperCamelCase__ : Optional[Any] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]]).to(UpperCamelCase)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
def lowerCAmelCase__ ( self) -> Any:
UpperCamelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(UpperCamelCase)
.eval()
)
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : Optional[Any] = prepare_img()
UpperCamelCase__ : Dict = image_processor(UpperCamelCase , return_tensors='pt').to(UpperCamelCase)
UpperCamelCase__ : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCamelCase)
# masks_queries_logits
UpperCamelCase__ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ : Optional[int] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCamelCase__ : Tuple = torch.tensor(UpperCamelCase).to(UpperCamelCase)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
# class_queries_logits
UpperCamelCase__ : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCamelCase__ : Optional[Any] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
]).to(UpperCamelCase)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
def lowerCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase__ : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff')
.to(UpperCamelCase)
.eval()
)
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : Tuple = prepare_img()
UpperCamelCase__ : Tuple = image_processor(UpperCamelCase , return_tensors='pt').to(UpperCamelCase)
UpperCamelCase__ : List[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCamelCase)
# masks_queries_logits
UpperCamelCase__ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ : Optional[Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCamelCase__ : int = torch.tensor(UpperCamelCase).to(UpperCamelCase)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
# class_queries_logits
UpperCamelCase__ : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCamelCase__ : Optional[Any] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]]).to(UpperCamelCase)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
def lowerCAmelCase__ ( self) -> Tuple:
UpperCamelCase__ : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(UpperCamelCase)
.eval()
)
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : Any = image_processor(
[np.zeros((3, 8_00, 13_33)), np.zeros((3, 8_00, 13_33))] , segmentation_maps=[np.zeros((3_84, 3_84)).astype(np.floataa), np.zeros((3_84, 3_84)).astype(np.floataa)] , return_tensors='pt' , )
UpperCamelCase__ : Dict = inputs['pixel_values'].to(UpperCamelCase)
UpperCamelCase__ : Optional[int] = [el.to(UpperCamelCase) for el in inputs['mask_labels']]
UpperCamelCase__ : Optional[int] = [el.to(UpperCamelCase) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(**UpperCamelCase)
self.assertTrue(outputs.loss is not None)
| 410
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase__ ( lowerCAmelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = DPTConfig()
if "large" in checkpoint_url:
UpperCAmelCase = 1_024
UpperCAmelCase = 4_096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = [5, 11, 17, 23]
UpperCAmelCase = [256, 512, 1_024, 1_024]
UpperCAmelCase = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 150
UpperCAmelCase = 'huggingface/label-files'
UpperCAmelCase = 'ade20k-id2label.json'
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) ) , 'r' ) )
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def lowercase__ ( lowerCAmelCase : str ) -> Any:
"""simple docstring"""
UpperCAmelCase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def lowercase__ ( lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
UpperCAmelCase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
UpperCAmelCase = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
UpperCAmelCase = name.replace('proj' , 'projection' )
if "blocks" in name:
UpperCAmelCase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
UpperCAmelCase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
UpperCAmelCase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
UpperCAmelCase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
UpperCAmelCase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
UpperCAmelCase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
UpperCAmelCase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
UpperCAmelCase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
UpperCAmelCase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
UpperCAmelCase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
UpperCAmelCase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
UpperCAmelCase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
UpperCAmelCase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
UpperCAmelCase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
UpperCAmelCase = name.replace('bn' , 'batch_norm' )
if "head" in name:
UpperCAmelCase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
UpperCAmelCase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
UpperCAmelCase = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def lowercase__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = get_dpt_config(lowerCAmelCase )
# load original state_dict from URL
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(lowerCAmelCase )
UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase = DPTForSemanticSegmentation(lowerCAmelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase = 480 if 'ade' in checkpoint_url else 384
UpperCAmelCase = DPTImageProcessor(size=lowerCAmelCase )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='pt' )
# forward pass
UpperCAmelCase = model(**lowerCAmelCase ).logits if 'ade' in checkpoint_url else model(**lowerCAmelCase ).predicted_depth
# Assert logits
UpperCAmelCase = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
UpperCAmelCase = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowerCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase )
)
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 183
|
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@slow
@require_torch
def a_ ( self ) -> List[Any]:
UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCAmelCase = bertabert.config.encoder.vocab_size
UpperCAmelCase = tokenizer.sep_token_id
UpperCAmelCase = tokenizer.cls_token_id
UpperCAmelCase = 1_2_8
UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCAmelCase = train_dataset.select(range(3_2 ) )
UpperCAmelCase = val_dataset.select(range(1_6 ) )
UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(lowercase_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=lowercase_ , max_length=5_1_2 )
UpperCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowercase_ , max_length=1_2_8 )
UpperCAmelCase = inputs.input_ids
UpperCAmelCase = inputs.attention_mask
UpperCAmelCase = outputs.input_ids
UpperCAmelCase = outputs.input_ids.copy()
UpperCAmelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCAmelCase = outputs.attention_mask
assert all(len(lowercase_ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowercase_ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase_ ):
UpperCAmelCase = pred.label_ids
UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy='steps' , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , )
# start training
trainer.train()
| 183
| 1
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->str:
"""simple docstring"""
if openai_config_file == "":
__magic_name__ : Optional[Any] = OpenAIGPTConfig()
else:
__magic_name__ : str = OpenAIGPTConfig.from_json_file(__UpperCamelCase )
__magic_name__ : Dict = OpenAIGPTModel(__UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# Save pytorch-model
__magic_name__ : Any = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__magic_name__ : Optional[int] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict(), __UpperCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
lowercase_ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 154
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , __snake_case : Tuple , __snake_case : Tuple=7 , __snake_case : int=3 , __snake_case : List[str]=30 , __snake_case : Optional[Any]=4_00 , __snake_case : Dict=True , __snake_case : Optional[Any]=None , __snake_case : Dict=True , __snake_case : Any=[0.5, 0.5, 0.5] , __snake_case : Tuple=[0.5, 0.5, 0.5] , __snake_case : Any=True , __snake_case : str=1 / 2_55 , __snake_case : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
def lowerCamelCase_ ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self : int , __snake_case : Union[str, Any] , __snake_case : List[Any]=False ):
if not batched:
UpperCAmelCase_ = image_inputs[0]
if isinstance(__snake_case , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = self.size['''shortest_edge''']
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(__snake_case , key=lambda __snake_case : item[0] )[0]
UpperCAmelCase_ = max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Any = DetaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = DetaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_rescale''' ) )
self.assertTrue(hasattr(__snake_case , '''do_pad''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __snake_case )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
# prepare image and target
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
UpperCAmelCase_ = DetaImageProcessor()
UpperCAmelCase_ = image_processing(images=__snake_case , annotations=__snake_case , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify orig_size
UpperCAmelCase_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
UpperCAmelCase_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
@slow
def lowerCamelCase_ ( self : int ):
# prepare image, target and masks_path
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
UpperCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCAmelCase_ = DetaImageProcessor(format='''coco_panoptic''' )
UpperCAmelCase_ = image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors='''pt''' )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __snake_case ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __snake_case )
UpperCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __snake_case , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __snake_case ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __snake_case ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __snake_case ) )
# verify masks
UpperCAmelCase_ = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __snake_case )
# verify orig_size
UpperCAmelCase_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __snake_case ) )
# verify size
UpperCAmelCase_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __snake_case ) )
| 144
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 307
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=100 , UpperCAmelCase=" " ) -> List[str]:
"""simple docstring"""
_a : int = text.split(UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase )]
def UpperCamelCase__ ( UpperCAmelCase ) -> dict:
"""simple docstring"""
_a , _a : List[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(UpperCAmelCase ):
titles.append(title if title is not None else '''''' )
texts.append(UpperCAmelCase )
return {"title": titles, "text": texts}
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> dict:
"""simple docstring"""
_a : str = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
_a : int = ctx_encoder(input_ids.to(device=UpperCAmelCase ) , return_dict=UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> str:
"""simple docstring"""
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_a : Any = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_a : Union[str, Any] = dataset.map(UpperCAmelCase , batched=UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_a : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase )
_a : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_a : Optional[Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
_a : str = dataset.map(
partial(UpperCAmelCase , ctx_encoder=UpperCAmelCase , ctx_tokenizer=UpperCAmelCase ) , batched=UpperCAmelCase , batch_size=processing_args.batch_size , features=UpperCAmelCase , )
# And finally save your dataset
_a : List[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_a : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=UpperCAmelCase )
# And save the index
_a : List[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=str(Path(UpperCamelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowercase = field(
default=UpperCamelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowercase = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowercase = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowercase = field(
default=str(Path(UpperCamelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=UpperCamelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowercase = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowercase = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 307
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
snake_case = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def A_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict=[] ):
_lowerCAmelCase = size[0] - overlap_pixels * 2
_lowerCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowerCAmelCase = np.pad(_lowerCamelCase , mode='linear_ramp' , pad_width=_lowerCamelCase , end_values=0 )
if "l" in remove_borders:
_lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def A_ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] ):
return max(_lowerCamelCase , min(_lowerCamelCase , _lowerCamelCase ) )
def A_ ( _lowerCamelCase : [int] , _lowerCamelCase : [int] , _lowerCamelCase : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def A_ ( _lowerCamelCase : [int] , _lowerCamelCase : int , _lowerCamelCase : [int] ):
_lowerCAmelCase = list(_lowerCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowerCAmelCase = clamp_rect(_lowerCamelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def A_ ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] ):
_lowerCAmelCase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_lowerCamelCase , (original_slice, 0) )
return result
def A_ ( _lowerCamelCase : str , _lowerCamelCase : str ):
_lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowerCAmelCase = tile.crop(_lowerCamelCase )
return tile
def A_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple ):
_lowerCAmelCase = n % d
return n - divisor
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : AutoencoderKL , __lowerCAmelCase : CLIPTextModel , __lowerCAmelCase : CLIPTokenizer , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCAmelCase : int = 350 , ):
"""simple docstring"""
super().__init__(
vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , low_res_scheduler=__lowerCAmelCase , scheduler=__lowerCAmelCase , max_noise_level=__lowerCAmelCase , )
def a ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowerCAmelCase = add_overlap_rect(__lowerCAmelCase , __lowerCAmelCase , image.size )
_lowerCAmelCase = image.crop(__lowerCAmelCase )
_lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowerCAmelCase = translated_slice_x - (original_image_slice / 2)
_lowerCAmelCase = max(0 , __lowerCAmelCase )
_lowerCAmelCase = squeeze_tile(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = to_input.size
_lowerCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowerCAmelCase = super(__lowerCAmelCase , self ).__call__(image=__lowerCAmelCase , **__lowerCAmelCase ).images[0]
_lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase = unsqueeze_tile(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowerCAmelCase = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_lowerCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowerCAmelCase ) , mode='L' , )
final_image.paste(
__lowerCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , __lowerCAmelCase : Union[str, List[str]] , __lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCAmelCase : int = 75 , __lowerCAmelCase : float = 9.0 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 128 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , ):
"""simple docstring"""
_lowerCAmelCase = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
_lowerCAmelCase = math.ceil(image.size[0] / tile_size )
_lowerCAmelCase = math.ceil(image.size[1] / tile_size )
_lowerCAmelCase = tcx * tcy
_lowerCAmelCase = 0
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
self._process_tile(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prompt=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , noise_level=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , eta=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def A_ ( ):
# Run a demo
_lowerCAmelCase = 'stabilityai/stable-diffusion-x4-upscaler'
_lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(_lowerCamelCase , revision='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to('cuda' )
_lowerCAmelCase = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(_lowerCamelCase : Any ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save('diffusers_library_progress.jpg' )
_lowerCAmelCase = pipe(image=_lowerCamelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=_lowerCamelCase )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''efficientformer'''
def __init__( self , _UpperCAmelCase = [3, 2, 6, 4] , _UpperCAmelCase = [48, 96, 224, 448] , _UpperCAmelCase = [True, True, True, True] , _UpperCAmelCase = 448 , _UpperCAmelCase = 32 , _UpperCAmelCase = 4 , _UpperCAmelCase = 7 , _UpperCAmelCase = 5 , _UpperCAmelCase = 8 , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 16 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 1e-1_2 , _UpperCAmelCase = 224 , _UpperCAmelCase = 1e-0_5 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : Optional[int] = hidden_act
__A : Optional[int] = hidden_dropout_prob
__A : Tuple = hidden_sizes
__A : Union[str, Any] = num_hidden_layers
__A : int = num_attention_heads
__A : Any = initializer_range
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = patch_size
__A : List[str] = num_channels
__A : str = depths
__A : str = mlp_expansion_ratio
__A : str = downsamples
__A : Optional[Any] = dim
__A : Dict = key_dim
__A : Tuple = attention_ratio
__A : str = resolution
__A : Optional[Any] = pool_size
__A : str = downsample_patch_size
__A : str = downsample_stride
__A : Optional[Any] = downsample_pad
__A : Optional[int] = drop_path_rate
__A : List[str] = num_metaad_blocks
__A : Union[str, Any] = distillation
__A : int = use_layer_scale
__A : Tuple = layer_scale_init_value
__A : str = image_size
__A : List[str] = batch_norm_eps
| 338
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : List[str] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowercase__ : List[str] = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else bos_token
__A : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else eos_token
__A : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else unk_token
__A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else pad_token
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else cls_token
__A : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__A : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
__A : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__A : Tuple = vocab_file
__A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
'''simple docstring'''
__A : Optional[int] = self.__dict__.copy()
__A : List[str] = None
return state
def __setstate__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__A : Tuple = {}
__A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.sp_model.piece_to_id(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = self.sp_model.IdToPiece(_UpperCAmelCase)
return token
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = []
__A : int = ''
__A : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase) + token
__A : Dict = True
__A : List[Any] = []
else:
current_sub_tokens.append(_UpperCAmelCase)
__A : Union[str, Any] = False
out_string += self.sp_model.decode(_UpperCAmelCase)
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = kwargs.pop('use_source_tokenizer' , _UpperCAmelCase)
__A : str = self.convert_ids_to_tokens(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__A : Dict = []
__A : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase))
__A : Any = []
sub_texts.append(_UpperCAmelCase)
else:
current_sub_text.append(_UpperCAmelCase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__A : Tuple = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(_UpperCAmelCase))
else:
__A : Any = ''.join(_UpperCAmelCase)
__A : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__A : str = self.clean_up_tokenization(_UpperCAmelCase)
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _UpperCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_UpperCAmelCase , 'wb') as fi:
__A : Any = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Union[str, Any] = [self.cls_token_id]
__A : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase)) + [1]
return [1] + ([0] * len(_UpperCAmelCase)) + [1] + ([0] * len(_UpperCAmelCase)) + [1]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : str = [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
| 338
| 1
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self , _a , _a=13 , _a=32 , _a=2 , _a=3 , _a=16 , _a=[32, 64, 128] , _a=[1, 2, 1] , _a=[2, 2, 4] , _a=2 , _a=2.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=True , _a=0.0_2 , _a=1E-5 , _a=True , _a=None , _a=True , _a=10 , _a=8 , _a=["stage1", "stage2"] , _a=[1, 2] , ) -> Optional[int]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = depths
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = window_size
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = use_absolute_embeddings
lowerCAmelCase_ = patch_norm
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = is_training
lowerCAmelCase_ = scope
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = encoder_stride
lowerCAmelCase_ = out_features
lowerCAmelCase_ = out_indices
def __a ( self ) -> List[str]:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> Union[str, Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __a ( self , _a , _a , _a ) -> str:
lowerCAmelCase_ = FocalNetModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
lowerCAmelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __a ( self , _a , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = FocalNetBackbone(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCAmelCase_ = None
lowerCAmelCase_ = FocalNetBackbone(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self , _a , _a , _a ) -> Optional[int]:
lowerCAmelCase_ = FocalNetForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = FocalNetForMaskedImageModeling(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ = model(_a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self , _a , _a , _a ) -> int:
lowerCAmelCase_ = self.type_sequence_label_size
lowerCAmelCase_ = FocalNetForImageClassification(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = FocalNetForImageClassification(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = FocalNetModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_a , embed_dim=37 , has_text_modality=_a )
def __a ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self ) -> List[str]:
return
def __a ( self ) -> int:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def __a ( self ) -> List[Any]:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def __a ( self ) -> Union[str, Any]:
pass
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def __a ( self , _a , _a , _a , _a ) -> Any:
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(_a , _a ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_a ) , _a )
# FocalNet has a different seq_length
lowerCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(_a ) , _a )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = reshaped_hidden_states[0].shape
lowerCAmelCase_ = (
reshaped_hidden_states[0].view(_a , _a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __a ( self ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = True
self.check_hidden_states_output(_a , _a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
self.check_hidden_states_output(_a , _a , _a , _a )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = True
self.check_hidden_states_output(_a , _a , _a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
self.check_hidden_states_output(_a , _a , _a , (padded_height, padded_width) )
@slow
def __a ( self ) -> List[str]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = FocalNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = _config_zero_init(_a )
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(config=_a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class __magic_name__ (unittest.TestCase ):
@cached_property
def __a ( self ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def __a ( self ) -> str:
lowerCAmelCase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(_a )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase_ = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase__ = FocalNetConfig
lowerCamelCase__ = False
def __a ( self ) -> Any:
lowerCAmelCase_ = FocalNetModelTester(self )
| 122
|
from ..utils import DummyObject, requires_backends
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''speech''']
def __init__( self , *_a , **_a ) -> str:
requires_backends(self , ["speech"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''speech''']
def __init__( self , *_a , **_a ) -> Union[str, Any]:
requires_backends(self , ["speech"] )
| 122
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Optional[Any] = 384
if "tiny" in model_name:
UpperCAmelCase__ :Tuple = [3, 3, 9, 3]
UpperCAmelCase__ :List[Any] = [96, 192, 384, 768]
if "small" in model_name:
UpperCAmelCase__ :Dict = [3, 3, 27, 3]
UpperCAmelCase__ :List[str] = [96, 192, 384, 768]
if "base" in model_name:
UpperCAmelCase__ :List[str] = [3, 3, 27, 3]
UpperCAmelCase__ :List[str] = [128, 256, 512, 1024]
UpperCAmelCase__ :Any = 512
if "large" in model_name:
UpperCAmelCase__ :Dict = [3, 3, 27, 3]
UpperCAmelCase__ :Union[str, Any] = [192, 384, 768, 1536]
UpperCAmelCase__ :List[Any] = 768
if "xlarge" in model_name:
UpperCAmelCase__ :str = [3, 3, 27, 3]
UpperCAmelCase__ :Optional[Any] = [256, 512, 1024, 2048]
UpperCAmelCase__ :Union[str, Any] = 1024
# set label information
UpperCAmelCase__ :str = 150
UpperCAmelCase__ :Any = 'huggingface/label-files'
UpperCAmelCase__ :Optional[Any] = 'ade20k-id2label.json'
UpperCAmelCase__ :Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ :int = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase__ :Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ :Any = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
UpperCAmelCase__ :Tuple = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :int = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Optional[int] = dct.pop(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Optional[Any] = val
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
UpperCAmelCase__ :Optional[int] = model_name_to_url[model_name]
UpperCAmelCase__ :Union[str, Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
UpperCAmelCase__ :int = get_upernet_config(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Union[str, Any] = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase__ :str = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
UpperCAmelCase__ :List[Any] = key.replace('bn' , 'batch_norm' )
UpperCAmelCase__ :Any = val
# rename keys
UpperCAmelCase__ :int = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
UpperCAmelCase__ :Optional[Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
UpperCAmelCase__ :int = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
UpperCAmelCase__ :Any = SegformerImageProcessor()
UpperCAmelCase__ :Any = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
UpperCAmelCase__ :int = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase__ :int = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase__ :Tuple = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase__ :Any = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase__ :int = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__snake_case : Optional[int] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 433
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
def A__ ( self ) ->Union[str, Any]:
UpperCAmelCase__ :List[Any] = tempfile.mkdtemp()
UpperCAmelCase__ :Union[str, Any] = 8
# DPR tok
UpperCAmelCase__ :List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase__ :str = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(A , exist_ok=A )
UpperCAmelCase__ :Tuple = os.path.join(A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase__ :Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase__ :List[Any] = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase__ :Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase__ :Optional[Any] = {'unk_token': '<unk>'}
UpperCAmelCase__ :int = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(A , exist_ok=A )
UpperCAmelCase__ :Any = os.path.join(A , BART_VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ :Optional[int] = os.path.join(A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def A__ ( self ) ->DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def A__ ( self ) ->DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def A__ ( self ) ->BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def A__ ( self ) ->Dict:
shutil.rmtree(self.tmpdirname )
def A__ ( self ) ->Any:
UpperCAmelCase__ :int = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :Any = self.get_dummy_dataset()
UpperCAmelCase__ :Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
UpperCAmelCase__ :int = dataset
UpperCAmelCase__ :List[str] = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def A__ ( self , A ) ->List[Any]:
UpperCAmelCase__ :Optional[Any] = self.get_dummy_dataset()
UpperCAmelCase__ :Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
UpperCAmelCase__ :Union[str, Any] = os.path.join(self.tmpdirname , 'dataset' )
UpperCAmelCase__ :int = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
UpperCAmelCase__ :Optional[Any] = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase__ :List[str] = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , A ) , )
return retriever
def A__ ( self ) ->str:
UpperCAmelCase__ :Union[str, Any] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase__ :Optional[int] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
UpperCAmelCase__ :int = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
UpperCAmelCase__ :List[str] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(A , open(A , 'wb' ) )
UpperCAmelCase__ :List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
UpperCAmelCase__ :Dict = RagRetriever(
A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A__ ( self ) ->Optional[Any]:
UpperCAmelCase__ :Optional[int] = 1
UpperCAmelCase__ :Tuple = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase__ :List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :int = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) ->Dict:
UpperCAmelCase__ :List[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
UpperCAmelCase__ :List[str] = self.get_dummy_dataset()
retriever.save_pretrained(A )
UpperCAmelCase__ :List[Any] = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
UpperCAmelCase__ :Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :List[Any] = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ) ->Tuple:
UpperCAmelCase__ :Tuple = 1
UpperCAmelCase__ :Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
UpperCAmelCase__ :Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[str] = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) ->Tuple:
UpperCAmelCase__ :str = self.get_dummy_custom_hf_index_retriever(from_disk=A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A )
UpperCAmelCase__ :List[Any] = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
UpperCAmelCase__ :int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :str = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ) ->Tuple:
UpperCAmelCase__ :Any = 1
UpperCAmelCase__ :Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
UpperCAmelCase__ :Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Any = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) ->Dict:
UpperCAmelCase__ :Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A )
UpperCAmelCase__ :int = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
UpperCAmelCase__ :Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :Union[str, Any] = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ) ->List[str]:
UpperCAmelCase__ :str = 1
UpperCAmelCase__ :List[str] = self.get_dummy_legacy_index_retriever()
UpperCAmelCase__ :str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Dict = retriever.retrieve(A , n_docs=A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ) ->List[str]:
UpperCAmelCase__ :Any = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A )
UpperCAmelCase__ :str = RagRetriever.from_pretrained(A )
self.assertIsInstance(A , A )
UpperCAmelCase__ :List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :int = retriever.retrieve(A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A__ ( self ) ->List[str]:
import torch
UpperCAmelCase__ :Optional[Any] = 1
UpperCAmelCase__ :Optional[int] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase__ :Union[str, Any] = [[5, 7], [10, 11]]
UpperCAmelCase__ :Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :List[str] = retriever(A , A , prefix=retriever.config.generator.prefix , n_docs=A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(A , A )
self.assertIsInstance(A , A )
self.assertIsInstance(A , np.ndarray )
UpperCAmelCase__ :List[str] = retriever(
A , A , prefix=retriever.config.generator.prefix , n_docs=A , return_tensors='pt' , )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[str] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(A , torch.Tensor )
self.assertIsInstance(A , torch.Tensor )
self.assertIsInstance(A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A__ ( self ) ->List[Any]:
UpperCAmelCase__ :Tuple = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase__ :Tuple = 1
UpperCAmelCase__ :Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=A )
retriever.set_ctx_encoder_tokenizer(A )
UpperCAmelCase__ :Tuple = [[5, 7], [10, 11]]
UpperCAmelCase__ :Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ :Any = retriever(A , A , prefix=retriever.config.generator.prefix , n_docs=A )
self.assertEqual(
len(A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , A ) # check for doc token related keys in dictionary.
| 433
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (IPNDMScheduler,)
SCREAMING_SNAKE_CASE_ = (("""num_inference_steps""", 50),)
def UpperCAmelCase__ ( self :str , **lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] ={'num_train_timesteps': 1_000}
config.update(**lowerCamelCase_ )
return config
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :int=0 , **lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =dict(self.forward_default_kwargs )
lowerCamelCase__ : List[Any] =kwargs.pop('num_inference_steps' , lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =self.dummy_sample
lowerCamelCase__ : List[str] =0.1 * sample
lowerCamelCase__ : Optional[int] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : int =self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : List[str] =scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : Optional[Any] =dummy_past_residuals[:]
if time_step is None:
lowerCamelCase__ : Any =scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : str =dummy_past_residuals[:]
lowerCamelCase__ : int =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : str =new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase__ : List[Any] =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Any =new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 , **lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =dict(self.forward_default_kwargs )
lowerCamelCase__ : Optional[Any] =kwargs.pop('num_inference_steps' , lowerCamelCase_ )
lowerCamelCase__ : List[Any] =self.dummy_sample
lowerCamelCase__ : List[str] =0.1 * sample
lowerCamelCase__ : Union[str, Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Optional[int] =self.get_scheduler_config()
lowerCamelCase__ : Dict =scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase__ : Optional[Any] =dummy_past_residuals[:]
if time_step is None:
lowerCamelCase__ : Optional[Any] =scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase__ : List[Any] =dummy_past_residuals[:]
lowerCamelCase__ : List[Any] =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] =new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase__ : Optional[int] =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : List[Any] =new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self :Any , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.scheduler_classes[0]
lowerCamelCase__ : Any =self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : Any =scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : Any =10
lowerCamelCase__ : List[str] =self.dummy_model()
lowerCamelCase__ : Tuple =self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : List[Any] =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : List[Any] =model(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : List[str] =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =dict(self.forward_default_kwargs )
lowerCamelCase__ : Optional[Any] =kwargs.pop('num_inference_steps' , lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Dict =self.get_scheduler_config()
lowerCamelCase__ : Tuple =scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : List[str] =self.dummy_sample
lowerCamelCase__ : Optional[int] =0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , 'set_timesteps' ):
lowerCamelCase__ : List[str] =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase__ : str =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCamelCase__ : List[str] =dummy_past_residuals[:]
lowerCamelCase__ : Union[str, Any] =scheduler.timesteps[5]
lowerCamelCase__ : int =scheduler.timesteps[6]
lowerCamelCase__ : Optional[int] =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Tuple =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCamelCase__ : Union[str, Any] =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Any =scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ , time_step=lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.full_loop()
lowerCamelCase__ : Optional[int] =torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 174
|
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : int = 1_0_0_0 ) ->int:
lowerCamelCase__ : Any =2**power
lowerCamelCase__ : Dict =0
while n:
lowerCamelCase__ , lowerCamelCase__ : Any =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 174
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 1
lowercase_ = 3
lowercase_ = (3_2, 3_2)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowerCAmelCase_)
return image
@property
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
torch.manual_seed(0)
lowercase_ = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
lowercase_ = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
torch.manual_seed(0)
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
return CLIPTextModel(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet_upscale
lowercase_ = DDPMScheduler()
lowercase_ = DDIMScheduler(prediction_type="""v_prediction""")
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
lowercase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
lowercase_ = Image.fromarray(np.uinta(lowerCAmelCase_)).convert("""RGB""").resize((6_4, 6_4))
# make sure here that pndm scheduler skips prk
lowercase_ = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_5_0 , )
lowercase_ = sd_pipe.to(lowerCAmelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = """A painting of a squirrel eating a burger"""
lowercase_ = torch.Generator(device=lowerCAmelCase_).manual_seed(0)
lowercase_ = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = output.images
lowercase_ = torch.Generator(device=lowerCAmelCase_).manual_seed(0)
lowercase_ = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase_ , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
lowercase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowercase_ = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet_upscale
lowercase_ = DDPMScheduler()
lowercase_ = DDIMScheduler(prediction_type="""v_prediction""")
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
lowercase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
lowercase_ = Image.fromarray(np.uinta(lowerCAmelCase_)).convert("""RGB""").resize((6_4, 6_4))
# make sure here that pndm scheduler skips prk
lowercase_ = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_5_0 , )
lowercase_ = sd_pipe.to(lowerCAmelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = """A painting of a squirrel eating a burger"""
lowercase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = output.images
assert image.shape[0] == 2
lowercase_ = torch.Generator(device=lowerCAmelCase_).manual_seed(0)
lowercase_ = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.dummy_cond_unet_upscale
lowercase_ = DDPMScheduler()
lowercase_ = DDIMScheduler(prediction_type="""v_prediction""")
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
lowercase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
lowercase_ = Image.fromarray(np.uinta(lowerCAmelCase_)).convert("""RGB""").resize((6_4, 6_4))
# put models in fp16, except vae as it overflows in fp16
lowercase_ = unet.half()
lowercase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowercase_ = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase_ , low_res_scheduler=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , max_noise_level=3_5_0 , )
lowercase_ = sd_pipe.to(lowerCAmelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = """A painting of a squirrel eating a burger"""
lowercase_ = torch.manual_seed(0)
lowercase_ = sd_pipe(
[prompt] , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , ).images
lowercase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""")
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""")
lowercase_ = """stabilityai/stable-diffusion-x4-upscaler"""
lowercase_ = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase_)
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
pipe.enable_attention_slicing()
lowercase_ = """a cat sitting on a park bench"""
lowercase_ = torch.manual_seed(0)
lowercase_ = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 1E-3
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""")
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""")
lowercase_ = """stabilityai/stable-diffusion-x4-upscaler"""
lowercase_ = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
pipe.enable_attention_slicing()
lowercase_ = """a cat sitting on a park bench"""
lowercase_ = torch.manual_seed(0)
lowercase_ = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5E-1
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""")
lowercase_ = """stabilityai/stable-diffusion-x4-upscaler"""
lowercase_ = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
lowercase_ = """a cat sitting on a park bench"""
lowercase_ = torch.manual_seed(0)
lowercase_ = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , output_type="""np""" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 100
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mgp-str"
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]=[3_2, 1_2_8] , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[str]=2_7 , lowerCAmelCase_ : str=3_8 , lowerCAmelCase_ : Optional[int]=5_0_2_5_7 , lowerCAmelCase_ : Any=3_0_5_2_2 , lowerCAmelCase_ : str=7_6_8 , lowerCAmelCase_ : Any=1_2 , lowerCAmelCase_ : Optional[Any]=1_2 , lowerCAmelCase_ : Tuple=4.0 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : List[Any]=1E-5 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : List[str]=0.02 , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = max_token_length
lowercase_ = num_character_labels
lowercase_ = num_bpe_labels
lowercase_ = num_wordpiece_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = mlp_ratio
lowercase_ = distilled
lowercase_ = layer_norm_eps
lowercase_ = drop_rate
lowercase_ = qkv_bias
lowercase_ = attn_drop_rate
lowercase_ = drop_path_rate
lowercase_ = output_aa_attentions
lowercase_ = initializer_range
| 100
| 1
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any]=13 , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[Any]=True , lowerCamelCase : List[str]=99 , lowerCamelCase : str=64 , lowerCamelCase : Tuple=5 , lowerCamelCase : str=4 , lowerCamelCase : Any=37 , lowerCamelCase : Union[str, Any]="gelu" , lowerCamelCase : str=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : Any=512 , lowerCamelCase : Tuple=16 , lowerCamelCase : List[str]=2 , lowerCamelCase : Dict=0.02 , lowerCamelCase : List[Any]=3 , lowerCamelCase : Any=4 , lowerCamelCase : Optional[int]=None , ) -> int:
__snake_case : Union[str, Any] = parent
__snake_case : List[str] = batch_size
__snake_case : str = seq_length
__snake_case : Tuple = is_training
__snake_case : str = use_input_mask
__snake_case : Any = use_token_type_ids
__snake_case : List[Any] = use_labels
__snake_case : str = vocab_size
__snake_case : Any = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : str = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : int = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : Tuple = num_labels
__snake_case : str = num_choices
__snake_case : Union[str, Any] = scope
__snake_case : int = vocab_size - 1
def __snake_case ( self : Tuple ) -> Optional[int]:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Dict = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __snake_case ( self : Optional[Any] ) -> List[str]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __snake_case ( self : List[str] ) -> str:
__snake_case , __snake_case , __snake_case , __snake_case : int = self.prepare_config_and_inputs()
__snake_case : List[Any] = True
return config, input_ids, input_mask, token_labels
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : List[str] ) -> Optional[Any]:
__snake_case : Dict = GPTNeoXModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase )
__snake_case : Any = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] ) -> int:
__snake_case : Tuple = True
__snake_case : Dict = GPTNeoXModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ) -> Tuple:
__snake_case : str = GPTNeoXForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Any = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] ) -> Any:
__snake_case : Any = self.num_labels
__snake_case : str = GPTNeoXForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Dict ) -> Union[str, Any]:
__snake_case : Any = self.num_labels
__snake_case : List[Any] = GPTNeoXForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : Tuple = self.num_labels
__snake_case : Dict = GPTNeoXForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> List[Any]:
__snake_case : Tuple = True
__snake_case : int = GPTNeoXForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
__snake_case : Tuple = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
__snake_case : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : str = model(lowerCamelCase , attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase )
__snake_case : Optional[int] = output_from_no_past["hidden_states"][0]
__snake_case : Optional[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__snake_case : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : str = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __snake_case ( self : List[Any] ) -> Tuple:
__snake_case : List[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : Any = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : str = False
def __snake_case ( self : Optional[int] ) -> int:
__snake_case : List[str] = GPTNeoXModelTester(self )
__snake_case : str = ConfigTester(self , config_class=lowerCamelCase , hidden_size=64 , num_attention_heads=8 )
def __snake_case ( self : str ) -> Tuple:
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ) -> str:
__snake_case , __snake_case , __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Any ) -> List[str]:
__snake_case , __snake_case , __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : str ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
__snake_case , __snake_case , __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : Tuple = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : str ) -> Dict:
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
def __snake_case ( self : str ) -> Dict:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Tuple:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def __snake_case ( self : List[Any] ) -> Optional[int]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __snake_case ( self : str , lowerCamelCase : Optional[int] ) -> str:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = ids_tensor([1, 10] , config.vocab_size )
__snake_case : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : str = GPTNeoXModel(lowerCamelCase )
original_model.to(lowerCamelCase )
original_model.eval()
__snake_case : List[Any] = original_model(lowerCamelCase ).last_hidden_state
__snake_case : Tuple = original_model(lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Union[str, Any] = {"type": scaling_type, "factor": 10.0}
__snake_case : List[Any] = GPTNeoXModel(lowerCamelCase )
scaled_model.to(lowerCamelCase )
scaled_model.eval()
__snake_case : List[Any] = scaled_model(lowerCamelCase ).last_hidden_state
__snake_case : Any = scaled_model(lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
__snake_case : Union[str, Any] = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase )
__snake_case : Dict = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__snake_case : Optional[Any] = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
__snake_case : List[Any] = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 )
__snake_case : List[str] = tokenizer.batch_decode(lowerCamelCase )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 81
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Optional[int] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 637
| 0
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase =CLIPConfig
_lowerCAmelCase =['CLIPEncoderLayer']
def __init__( self : Any , _lowerCamelCase : CLIPConfig ):
super().__init__(_lowerCamelCase )
snake_case__ : Dict = CLIPVisionModelWithProjection(config.vision_config )
snake_case__ : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
snake_case__ : Optional[int] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase__ ( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=0.5 , _lowerCamelCase : Tuple=0.5 ):
snake_case__ : Dict = self.vision_model(_lowerCamelCase )[0]
snake_case__ : List[str] = self.p_head(_lowerCamelCase )
snake_case__ : List[str] = nsfw_detected.flatten()
snake_case__ : Union[str, Any] = nsfw_detected > p_threshold
snake_case__ : Tuple = nsfw_detected.tolist()
if any(_lowerCamelCase ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(_lowerCamelCase ):
if nsfw_detected_:
snake_case__ : Optional[Any] = np.zeros(images[idx].shape )
snake_case__ : Optional[int] = self.w_head(_lowerCamelCase )
snake_case__ : Any = watermark_detected.flatten()
snake_case__ : List[Any] = watermark_detected > w_threshold
snake_case__ : Any = watermark_detected.tolist()
if any(_lowerCamelCase ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(_lowerCamelCase ):
if watermark_detected_:
snake_case__ : str = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 303
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='xmod'
def __init__( self : Any , _lowerCamelCase : Any=3_0_5_2_2 , _lowerCamelCase : int=7_6_8 , _lowerCamelCase : Union[str, Any]=1_2 , _lowerCamelCase : Any=1_2 , _lowerCamelCase : int=3_0_7_2 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : List[Any]=5_1_2 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : str="absolute" , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=False , _lowerCamelCase : List[str]=2 , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[str]=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Dict=("en_XX",) , _lowerCamelCase : Tuple=None , **_lowerCamelCase : List[str] , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
snake_case__ : List[str] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : List[Any] = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[int] = position_embedding_type
snake_case__ : Tuple = use_cache
snake_case__ : Dict = classifier_dropout
snake_case__ : Any = pre_norm
snake_case__ : str = adapter_reduction_factor
snake_case__ : Any = adapter_layer_norm
snake_case__ : Optional[int] = adapter_reuse_layer_norm
snake_case__ : List[Any] = ln_before_adapter
snake_case__ : Dict = list(_lowerCamelCase )
snake_case__ : Union[str, Any] = default_language
class snake_case__ ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 303
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : str = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "deformable_detr"
_lowerCamelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=300 , UpperCamelCase=1024 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=False , UpperCamelCase=300 , UpperCamelCase=False , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = backbone_config.get("model_type" )
lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ = config_class.from_dict(UpperCamelCase )
lowerCamelCase_ = use_timm_backbone
lowerCamelCase_ = backbone_config
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_queries
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = init_xavier_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = auxiliary_loss
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = backbone
lowerCamelCase_ = use_pretrained_backbone
lowerCamelCase_ = dilation
# deformable attributes
lowerCamelCase_ = num_feature_levels
lowerCamelCase_ = encoder_n_points
lowerCamelCase_ = decoder_n_points
lowerCamelCase_ = two_stage
lowerCamelCase_ = two_stage_num_proposals
lowerCamelCase_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCamelCase_ = class_cost
lowerCamelCase_ = bbox_cost
lowerCamelCase_ = giou_cost
# Loss coefficients
lowerCamelCase_ = mask_loss_coefficient
lowerCamelCase_ = dice_loss_coefficient
lowerCamelCase_ = bbox_loss_coefficient
lowerCamelCase_ = giou_loss_coefficient
lowerCamelCase_ = eos_coefficient
lowerCamelCase_ = focal_alpha
lowerCamelCase_ = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def snake_case ( self ):
"""simple docstring"""
return self.d_model
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 675
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False ):
_lowerCAmelCase =scheduler
_lowerCAmelCase =optimizers if isinstance(lowerCamelCase_ , (list, tuple) ) else [optimizers]
_lowerCAmelCase =split_batches
_lowerCAmelCase =step_with_optimizer
_lowerCAmelCase =GradientState()
def lowerCAmelCase__ ( self : Optional[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[int] ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCamelCase_ , **lowerCamelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCamelCase_ , **lowerCamelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_lowerCAmelCase =AcceleratorState().num_processes
for _ in range(lowerCamelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCamelCase_ , **lowerCamelCase_ )
else:
self.scheduler.step(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCAmelCase__ ( self : int ):
return self.scheduler.get_last_lr()
def lowerCAmelCase__ ( self : Dict ):
return self.scheduler.state_dict()
def lowerCAmelCase__ ( self : Dict , lowerCamelCase_ : Any ):
self.scheduler.load_state_dict(lowerCamelCase_ )
def lowerCAmelCase__ ( self : Optional[int] ):
return self.scheduler.get_lr()
def lowerCAmelCase__ ( self : int , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ):
return self.scheduler.print_lr(*lowerCamelCase_ , **lowerCamelCase_ )
| 149
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__SCREAMING_SNAKE_CASE : Dict = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def snake_case_ ( lowercase__ : str = "dhaka" , lowercase__ : int = 5 ):
'''simple docstring'''
_lowerCAmelCase =min(lowercase__ , 50 ) # Prevent abuse!
_lowerCAmelCase ={
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_lowerCAmelCase =requests.get("""https://www.google.com/search""" , params=lowercase__ , headers=lowercase__ )
_lowerCAmelCase =BeautifulSoup(html.text , """html.parser""" )
_lowerCAmelCase ="""""".join(
re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
_lowerCAmelCase =json.dumps(lowercase__ )
_lowerCAmelCase =json.loads(lowercase__ )
_lowerCAmelCase =re.findall(
r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , lowercase__ , )
if not matched_google_image_data:
return 0
_lowerCAmelCase =re.sub(
r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(lowercase__ ) , )
_lowerCAmelCase =re.findall(
r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , lowercase__ , )
for index, fixed_full_res_image in enumerate(lowercase__ ):
if index >= max_images:
return index
_lowerCAmelCase =bytes(lowercase__ , """ascii""" ).decode(
"""unicode-escape""" )
_lowerCAmelCase =bytes(lowercase__ , """ascii""" ).decode(
"""unicode-escape""" )
_lowerCAmelCase =urllib.request.build_opener()
_lowerCAmelCase =[
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(lowercase__ )
_lowerCAmelCase =f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
urllib.request.urlretrieve( # noqa: S310
lowercase__ , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__SCREAMING_SNAKE_CASE : Any = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 149
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( _lowerCamelCase : int ):
A__ = [True] * limit
A__ = False
A__ = False
A__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
A__ = i * 2
while index < limit:
A__ = False
A__ = index + i
A__ = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def UpperCamelCase ( _lowerCamelCase : int = 1_00_00_00 ):
A__ = prime_sieve(_lowerCamelCase )
A__ = 0
A__ = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length , len(_lowerCamelCase ) ):
A__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
A__ = j - i
A__ = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 440
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Union[str, Any] ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 440
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class _snake_case ( a__ , a__ ):
__lowerCAmelCase : Tuple = """convnextv2"""
def __init__( self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__)
lowercase__ : List[Any] = num_channels
lowercase__ : str = patch_size
lowercase__ : Union[str, Any] = num_stages
lowercase__ : List[Any] = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
lowercase__ : str = [3, 3, 9, 3] if depths is None else depths
lowercase__ : int = hidden_act
lowercase__ : str = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : Optional[int] = drop_path_rate
lowercase__ : Dict = image_size
lowercase__ : Any = ["stem"] + [f'stage{idx}' for idx in range(1 , len(self.depths) + 1)]
lowercase__ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names)
| 709
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ : List[str] = """RegNetConfig"""
# Base docstring
lowerCamelCase__ : str = """facebook/regnet-y-040"""
lowerCamelCase__ : str = [1, 1_0_8_8, 7, 7]
# Image classification docstring
lowerCamelCase__ : Union[str, Any] = """facebook/regnet-y-040"""
lowerCamelCase__ : str = """tabby, tabby cat"""
lowerCamelCase__ : Optional[Any] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = "relu" , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase__ : str = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
lowercase__ : Optional[int] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding="""VALID""" , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name="""convolution""" , )
lowercase__ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""")
lowercase__ : Any = ACTaFN[activation] if activation is not None else tf.identity
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.convolution(self.padding(SCREAMING_SNAKE_CASE_))
lowercase__ : List[Any] = self.normalization(SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.activation(SCREAMING_SNAKE_CASE_)
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = config.num_channels
lowercase__ : int = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = shape_list(SCREAMING_SNAKE_CASE_)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase__ : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1))
lowercase__ : Optional[int] = self.embedder(SCREAMING_SNAKE_CASE_)
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name="""convolution""")
lowercase__ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False):
'''simple docstring'''
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_) , training=SCREAMING_SNAKE_CASE_)
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name="""pooler""")
lowercase__ : Tuple = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation="""relu""" , name="""attention.0"""),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2"""),
]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE_)
for layer_module in self.attention:
lowercase__ : Optional[Any] = layer_module(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = hidden_state * pooled
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = in_channels != out_channels or stride != 1
lowercase__ : Any = max(1 , out_channels // config.groups_width)
lowercase__ : List[Any] = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name="""shortcut""")
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""")
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase__ : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0"""),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name="""layer.1"""),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name="""layer.2"""),
]
lowercase__ : Tuple = ACTaFN[config.hidden_act]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = hidden_state
for layer_module in self.layers:
lowercase__ : Any = layer_module(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.shortcut(SCREAMING_SNAKE_CASE_)
hidden_state += residual
lowercase__ : Any = self.activation(SCREAMING_SNAKE_CASE_)
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Any = in_channels != out_channels or stride != 1
lowercase__ : str = max(1 , out_channels // config.groups_width)
lowercase__ : int = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name="""shortcut""")
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""")
)
lowercase__ : Any = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0"""),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name="""layer.1"""),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4)) , name="""layer.2"""),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name="""layer.3"""),
]
lowercase__ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = hidden_state
for layer_module in self.layers:
lowercase__ : Optional[Any] = layer_module(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE_)
hidden_state += residual
lowercase__ : Optional[int] = self.activation(SCREAMING_SNAKE_CASE_)
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
lowercase__ : Dict = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name="""layers.0"""),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'layers.{i+1}') for i in range(depth - 1)],
]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
for layer_module in self.layers:
lowercase__ : Tuple = layer_module(SCREAMING_SNAKE_CASE_)
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ))
lowercase__ : List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:])):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'stages.{i+1}'))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True):
'''simple docstring'''
lowercase__ : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : List[Any] = hidden_states + (hidden_state,)
lowercase__ : str = stage_module(SCREAMING_SNAKE_CASE_)
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_)
@keras_serializable
class _snake_case ( tf.keras.layers.Layer ):
__lowerCAmelCase : List[Any] = RegNetConfig
def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = config
lowercase__ : List[Any] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name="""embedder""")
lowercase__ : Any = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name="""encoder""")
lowercase__ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name="""pooler""")
@unpack_inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ):
'''simple docstring'''
lowercase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = encoder_outputs[0]
lowercase__ : Tuple = self.pooler(SCREAMING_SNAKE_CASE_)
# Change to NCHW output format have uniformity in the modules
lowercase__ : Dict = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2))
lowercase__ : Optional[Any] = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase__ : Optional[int] = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Dict = RegNetConfig
__lowerCAmelCase : Any = 'regnet'
__lowerCAmelCase : List[str] = 'pixel_values'
@property
def lowercase__ ( self):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa)}
lowerCamelCase__ : str = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ : Dict = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase_ , )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name="""regnet""")
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=False , ):
'''simple docstring'''
lowercase__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Dict = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase_ , )
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
def __init__( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : int = config.num_labels
lowercase__ : Optional[int] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name="""regnet""")
# classification head
lowercase__ : Any = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""") if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=False , ):
'''simple docstring'''
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[str] = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Optional[int] = self.classifier[0](SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.classifier[1](SCREAMING_SNAKE_CASE_)
lowercase__ : str = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_)
if not return_dict:
lowercase__ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states)
| 495
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : Optional[int] = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
UpperCamelCase : List[str] = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_INIT_CONFIGURATION
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = RealmTokenizer
def __init__( self : str , lowerCamelCase__ : str=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any="[UNK]" , lowerCamelCase__ : Optional[int]="[SEP]" , lowerCamelCase__ : Optional[Any]="[PAD]" , lowerCamelCase__ : Optional[int]="[CLS]" , lowerCamelCase__ : Tuple="[MASK]" , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : str , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
a__ : Optional[Any] = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
a__ : List[str] = do_lower_case
a__ : List[Any] = strip_accents
a__ : Optional[Any] = tokenize_chinese_chars
a__ : Optional[int] = normalizer_class(**lowerCamelCase__ )
a__ : Union[str, Any] = do_lower_case
def _UpperCamelCase( self : Any , lowerCamelCase__ : int , **lowerCamelCase__ : int ):
a__ : List[str] = PaddingStrategy.MAX_LENGTH
a__ : Dict = text
a__ : Optional[int] = kwargs.pop("text_pair" , lowerCamelCase__ )
a__ : Dict = kwargs.pop("return_tensors" , lowerCamelCase__ )
a__ : List[Any] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(lowerCamelCase__ ):
if batch_text_pair is not None:
a__ : Dict = batch_text_pair[idx]
else:
a__ : Union[str, Any] = None
a__ : int = super().__call__(lowerCamelCase__ , lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
a__ : Union[str, Any] = encoded_candidates.get("input_ids" )
a__ : List[Any] = encoded_candidates.get("attention_mask" )
a__ : int = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCamelCase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCamelCase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCamelCase__ )
a__ : Union[str, Any] = {key: item for key, item in output_data.items() if len(lowerCamelCase__ ) != 0}
return BatchEncoding(lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str=None ):
a__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : List[Any] = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : Tuple = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 37
|
from __future__ import annotations
def __UpperCAmelCase( lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ):
_lowerCamelCase : Tuple = cipher_alphabet or [chr(lowercase_ ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_lowerCamelCase : Dict = {
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
_lowerCamelCase : Tuple = frequencies_dict
if not case_sensitive:
_lowerCamelCase : Optional[int] = ciphertext.lower()
# Chi squared statistic values
_lowerCamelCase : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase_ ) ):
_lowerCamelCase : Any = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_lowerCamelCase : int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_lowerCamelCase : Optional[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_lowerCamelCase : Any = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCamelCase : List[str] = decrypted_with_shift.lower().count(lowercase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCamelCase : Optional[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCamelCase : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCamelCase : Union[str, Any] = decrypted_with_shift.count(lowercase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCamelCase : List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCamelCase : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_lowerCamelCase : Tuple = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase_ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_lowerCamelCase : int = min(
lowercase_ , key=lowercase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_lowerCamelCase
), (
_lowerCamelCase
),
) : Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 114
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCamelCase__ ( __magic_name__ : int ) -> List[str]:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
snake_case__ : Tuple = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
snake_case__ : Tuple = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
snake_case__ : Optional[int] = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
snake_case__ : str = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
snake_case__ : List[str] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
snake_case__ : Optional[int] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case__ : Union[str, Any] = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
snake_case__ : Optional[int] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
snake_case__ : Any = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
snake_case__ : Any = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
snake_case__ : Optional[Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case__ : Optional[int] = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
snake_case__ : Dict = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
snake_case__ : Dict = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
snake_case__ : Any = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
snake_case__ : Optional[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
snake_case__ : int = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
snake_case__ : Any = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
snake_case__ : str = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
snake_case__ : Optional[int] = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case__ : Union[str, Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
snake_case__ : Optional[int] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
snake_case__ : Union[str, Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
snake_case__ : List[Any] = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : int ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case__ : Dict = orig_state_dict.pop(a__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case__ : Union[str, Any] = key.split(""".""" )
snake_case__ , snake_case__ : Any = int(key_split[2] ), int(key_split[4] )
snake_case__ : List[Any] = config.vision_config.hidden_size
if "weight" in key:
snake_case__ : int = val[:dim, :]
snake_case__ : List[str] = val[dim : dim * 2, :]
snake_case__ : List[str] = val[-dim:, :]
else:
snake_case__ : int = val[:dim]
snake_case__ : Dict = val[dim : dim * 2]
snake_case__ : List[Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case__ : List[str] = key.split(""".""" )
snake_case__ : str = int(key_split[3] )
snake_case__ : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
snake_case__ : List[str] = val[:dim, :]
snake_case__ : Union[str, Any] = val[
dim : dim * 2, :
]
snake_case__ : Optional[int] = val[-dim:, :]
else:
snake_case__ : Optional[Any] = val[:dim]
snake_case__ : Optional[Any] = val[dim : dim * 2]
snake_case__ : Union[str, Any] = val[-dim:]
else:
snake_case__ : List[Any] = rename_key(a__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case__ : Any = val.squeeze_()
else:
snake_case__ : Tuple = val
return orig_state_dict
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Tuple = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : str="groupvit-gcc-yfcc" , __magic_name__ : Tuple=False ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = GroupViTConfig()
snake_case__ : str = GroupViTModel(a__ ).eval()
snake_case__ : Optional[int] = torch.load(a__ , map_location="""cpu""" )["""model"""]
snake_case__ : Dict = convert_state_dict(a__ , a__ )
snake_case__ , snake_case__ : Optional[Any] = model.load_state_dict(a__ , strict=a__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(a__ ) == 0)
# verify result
snake_case__ : Dict = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case__ : Any = prepare_img()
snake_case__ : int = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=a__ , padding=a__ , return_tensors="""pt""" )
with torch.no_grad():
snake_case__ : str = model(**a__ )
if model_name == "groupvit-gcc-yfcc":
snake_case__ : List[str] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case__ : Optional[int] = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , a__ , atol=1E-3 )
processor.save_pretrained(a__ )
model.save_pretrained(a__ )
print("""Successfully saved processor and model to""" , a__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(a__ , organization="""nielsr""" )
model.push_to_hub(a__ , organization="""nielsr""" )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
A_ : str = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 714
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Tuple ) -> Dict:
'''simple docstring'''
snake_case__ : Any = state_dict.pop(__magic_name__ )
snake_case__ : Optional[Any] = val
def UpperCamelCase__ ( __magic_name__ : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ : Any = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
snake_case__ : Tuple = value
else:
snake_case__ : Union[str, Any] = value
return new_state_dict
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Tuple=False ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = """"""
if is_panoptic:
snake_case__ : int = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
snake_case__ : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : List[Any] = in_proj_weight[:2_56, :]
snake_case__ : int = in_proj_bias[:2_56]
snake_case__ : int = in_proj_weight[2_56:5_12, :]
snake_case__ : Tuple = in_proj_bias[2_56:5_12]
snake_case__ : int = in_proj_weight[-2_56:, :]
snake_case__ : Any = in_proj_bias[-2_56:]
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case__ : str = """resnet101"""
if "dc5" in model_name:
snake_case__ : Optional[int] = True
snake_case__ : str = """panoptic""" in model_name
if is_panoptic:
snake_case__ : List[Any] = 2_50
else:
snake_case__ : Optional[Any] = 91
snake_case__ : Optional[Any] = """huggingface/label-files"""
snake_case__ : Optional[int] = """coco-detection-id2label.json"""
snake_case__ : int = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Any = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : Optional[Any] = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
# load image processor
snake_case__ : Optional[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
snake_case__ : Union[str, Any] = ConditionalDetrImageProcessor(format=__magic_name__ )
# prepare image
snake_case__ : str = prepare_img()
snake_case__ : Tuple = image_processor(images=__magic_name__ , return_tensors="""pt""" )
snake_case__ : Any = encoding["""pixel_values"""]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
snake_case__ : Optional[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , __magic_name__ , pretrained=__magic_name__ ).eval()
snake_case__ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case__ : Dict = """conditional_detr.""" + src
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : Union[str, Any] = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ , is_panoptic=__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : List[Any] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
snake_case__ : Dict = state_dict.pop(__magic_name__ )
snake_case__ : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : Optional[int] = state_dict.pop(__magic_name__ )
snake_case__ : Optional[int] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
snake_case__ : str = state_dict.pop(__magic_name__ )
snake_case__ : Dict = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case__ : int = state_dict.pop(__magic_name__ )
snake_case__ : str = val
# finally, create HuggingFace model and load state dict
snake_case__ : Tuple = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
model.push_to_hub(repo_id=__magic_name__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
snake_case__ : Union[str, Any] = conditional_detr(__magic_name__ )
snake_case__ : Dict = model(__magic_name__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ : List[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 419
| 0
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
if len(lowercase_ ) == 0:
return []
UpperCAmelCase__ = min(lowercase_ ), max(lowercase_ )
UpperCAmelCase__ = int(max_value - min_value ) + 1
UpperCAmelCase__ = [[] for _ in range(lowercase_ )]
for i in my_list:
buckets[int(i - min_value )].append(lowercase_ )
return [v for bucket in buckets for v in sorted(lowercase_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 603
|
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : str = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__UpperCAmelCase : Union[str, Any] = 6
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Tuple = 1901
__UpperCAmelCase : Any = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__UpperCAmelCase : Optional[int] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__UpperCAmelCase : Optional[Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__UpperCAmelCase : Optional[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
__UpperCAmelCase : int = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 462
| 0
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=100 , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : Union[str, Any]=30 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Optional[Any]=[0, 1, 2, 3] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = 100
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = out_indices
UpperCAmelCase_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = BeitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = BeitForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase__ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = BeitForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def lowercase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = BeitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).pixel_values.to(_UpperCAmelCase )
# prepare bool_masked_pos
UpperCAmelCase_ = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1e-2 ) )
@slow
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
UpperCAmelCase_ = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 21841) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
UpperCAmelCase_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
UpperCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ = Image.open(ds[0]["file"] )
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits
# verify the logits
UpperCAmelCase_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_UpperCAmelCase , )
else:
UpperCAmelCase_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase_ = model.to(_UpperCAmelCase )
UpperCAmelCase_ = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
UpperCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase_ = Image.open(ds[0]["file"] )
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
UpperCAmelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 14
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ = head.next, head
while fast and fast.next:
UpperCAmelCase_ = fast.next.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ = None
while second:
UpperCAmelCase_ = second.next
UpperCAmelCase_ = node
UpperCAmelCase_ = second
UpperCAmelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ = node.next
UpperCAmelCase_ = head.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ = [slow.val]
while slow.next:
UpperCAmelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ = cur.next
return True
def a__ ( lowerCAmelCase__ ):
if not head or not head.next:
return True
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase__ )
else:
UpperCAmelCase_ = [pos]
UpperCAmelCase_ = head.next
pos += 1
UpperCAmelCase_ = pos - 1
UpperCAmelCase_ = 0
for v in d.values():
if len(lowerCAmelCase__ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ = 0
for i in range(0 , len(lowerCAmelCase__ ) ):
if v[i] + v[len(lowerCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 14
| 1
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int ) -> float:
SCREAMING_SNAKE_CASE = x
SCREAMING_SNAKE_CASE = y
for step in range(__snake_case ): # noqa: B007
SCREAMING_SNAKE_CASE = a * a - b * b + x
SCREAMING_SNAKE_CASE = 2 * a * b + y
SCREAMING_SNAKE_CASE = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowercase (SCREAMING_SNAKE_CASE_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def lowercase (SCREAMING_SNAKE_CASE_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__snake_case , 1 , 1 ) )
def lowercase (SCREAMING_SNAKE_CASE_ : int = 8_00 , SCREAMING_SNAKE_CASE_ : int = 6_00 , SCREAMING_SNAKE_CASE_ : float = -0.6 , SCREAMING_SNAKE_CASE_ : float = 0 , SCREAMING_SNAKE_CASE_ : float = 3.2 , SCREAMING_SNAKE_CASE_ : int = 50 , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Image.Image:
SCREAMING_SNAKE_CASE = Image.new('RGB' , (image_width, image_height) )
SCREAMING_SNAKE_CASE = img.load()
# loop through the image-coordinates
for image_x in range(__snake_case ):
for image_y in range(__snake_case ):
# determine the figure-coordinates based on the image-coordinates
SCREAMING_SNAKE_CASE = figure_width / image_width * image_height
SCREAMING_SNAKE_CASE = figure_center_x + (image_x / image_width - 0.5) * figure_width
SCREAMING_SNAKE_CASE = figure_center_y + (image_y / image_height - 0.5) * figure_height
SCREAMING_SNAKE_CASE = get_distance(__snake_case , __snake_case , __snake_case )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
SCREAMING_SNAKE_CASE = get_color_coded_rgb(__snake_case )
else:
SCREAMING_SNAKE_CASE = get_black_and_white_rgb(__snake_case )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__UpperCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 247
|
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A_ : List[Any] =logging.get_logger(__name__)
class __UpperCAmelCase ( __a ):
__A : int = ['input_values', 'attention_mask']
def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 1_6000 , _lowerCamelCase = 0.0 , _lowerCamelCase = False , _lowerCamelCase = 80 , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = "hann_window" , _lowerCamelCase = 1.0 , _lowerCamelCase = 80 , _lowerCamelCase = 7600 , _lowerCamelCase = 1E-10 , _lowerCamelCase = 2 , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = return_attention_mask
lowerCAmelCase_ = num_mel_bins
lowerCAmelCase_ = hop_length
lowerCAmelCase_ = win_length
lowerCAmelCase_ = win_function
lowerCAmelCase_ = frame_signal_scale
lowerCAmelCase_ = fmin
lowerCAmelCase_ = fmax
lowerCAmelCase_ = mel_floor
lowerCAmelCase_ = reduction_factor
lowerCAmelCase_ = win_length * sampling_rate // 1000
lowerCAmelCase_ = hop_length * sampling_rate // 1000
lowerCAmelCase_ = optimal_fft_length(self.sample_size )
lowerCAmelCase_ = (self.n_fft // 2) + 1
lowerCAmelCase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
lowerCAmelCase_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ):
if attention_mask is not None:
lowerCAmelCase_ = np.array(_lowerCamelCase , np.intaa )
lowerCAmelCase_ = []
for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ):
lowerCAmelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase_ = padding_value
normed_input_values.append(_lowerCamelCase )
else:
lowerCAmelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase_ ( self , _lowerCamelCase , ):
lowerCAmelCase_ = spectrogram(
_lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
lowerCAmelCase_ = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
else:
lowerCAmelCase_ = None
if audio_target is not None:
lowerCAmelCase_ = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase_ = inputs_target['''input_values''']
lowerCAmelCase_ = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
lowerCAmelCase_ = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
lowerCAmelCase_ = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase_ = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase_ = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech]
lowerCAmelCase_ = BatchFeature({'''input_values''': features} )
lowerCAmelCase_ = self.num_mel_bins
else:
lowerCAmelCase_ = BatchFeature({'''input_values''': speech} )
lowerCAmelCase_ = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
lowerCAmelCase_ = feature_size_hack
# convert input values to correct format
lowerCAmelCase_ = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
lowerCAmelCase_ = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_lowerCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCAmelCase_ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCAmelCase_ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowerCAmelCase_ = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase_ = (
attention_mask
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase_ = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_lowerCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
lowerCAmelCase_ = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase_ = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 274
| 0
|
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->Dict:
UpperCAmelCase__ = len(__snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __snake_case , __snake_case , )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->str:
UpperCAmelCase__ = []
depth_first_search([] , [] , [] , __snake_case , __snake_case )
# Print all the boards
for board in boards:
for column in board:
print(__snake_case )
print("""""" )
print(len(__snake_case ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 705
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
a : List[str] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
a : Union[str, Any] = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def snake_case__ ( ) ->Union[str, Any]:
UpperCAmelCase__ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase__ = bs[:]
UpperCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Dict:
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : int = ['input_ids', 'attention_mask']
def __init__( self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ):
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ = json.load(__lowercase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ = errors # how to handle errors in decoding
UpperCAmelCase__ = bytes_to_unicode()
UpperCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
UpperCAmelCase__ = {}
UpperCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def A__ ( self ):
return len(self.encoder )
def A__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , __lowercase ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = tuple(__lowercase )
UpperCAmelCase__ = get_pairs(__lowercase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__lowercase ):
try:
UpperCAmelCase__ = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__lowercase )
UpperCAmelCase__ = new_word
if len(__lowercase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__lowercase )
UpperCAmelCase__ = """ """.join(__lowercase )
UpperCAmelCase__ = word
return word
def A__ ( self , __lowercase ):
UpperCAmelCase__ = []
for token in re.findall(self.pat , __lowercase ):
UpperCAmelCase__ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(""" """ ) )
return bpe_tokens
def A__ ( self , __lowercase ):
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def A__ ( self , __lowercase ):
return self.decoder.get(__lowercase )
def A__ ( self , __lowercase ):
UpperCAmelCase__ = """""".join(__lowercase )
UpperCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def A__ ( self , __lowercase , __lowercase = None ):
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
UpperCAmelCase__ = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase__ = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self , __lowercase , __lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __lowercase , __lowercase = None , __lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def A__ ( self , __lowercase , __lowercase = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , __lowercase , __lowercase=False , **__lowercase ):
UpperCAmelCase__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
UpperCAmelCase__ = """ """ + text
return (text, kwargs)
| 422
| 0
|
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 19
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a__ :
def __UpperCamelCase ( self : List[Any],_A : Dict,_A : str,_A : Any ):
"""simple docstring"""
return None
class a__ :
def __UpperCamelCase ( self : Tuple,_A : Any,_A : Any,_A : str,_A : Optional[int] ):
"""simple docstring"""
return None
class a__ ( unittest.TestCase ):
A = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A,"tf",12,**_A )
@require_torch
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A,"pt",12,**_A )
@require_torch
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(_A ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE_ : str = BertModel(BertConfig(vocab_size=len(_A ) ) )
model.save_pretrained(_A )
self._test_export(_A,"pt",12,_A )
@require_tf
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE_ : List[str] = self._test_export(_A,"tf",12,**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = quantize(Path(_A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE_ : int = self._test_export(_A,"pt",12,**_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = quantize(_A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def __UpperCamelCase ( self : List[str],_A : Optional[int],_A : Optional[int],_A : Optional[Any],_A : Dict=None,**_A : List[Any] ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE_ : Dict = Path(_A ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_A,_A,_A,_A,_A,**_A )
return path
except Exception as e:
self.fail(_A )
@require_torch
@require_tokenizers
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE_ : Optional[int] = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE_ : List[str] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_A,_A,"pt" )
@require_tf
@require_tokenizers
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
from transformers import TFBertModel
SCREAMING_SNAKE_CASE_ : List[Any] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_A,_A,"tf" )
def __UpperCamelCase ( self : Union[str, Any],_A : Dict,_A : List[str],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = FeatureExtractionPipeline(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = infer_shapes(_A,_A )
# Assert all variables are present
self.assertEqual(len(_A ),len(_A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3],_A )
self.assertSequenceEqual(variable_names[3:],_A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name],{0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"],{0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"],{0: "batch"} )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = ensure_valid_input(FuncContiguousArgs(),_A,_A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_A ),3 )
# Should have exactly the same input names
self.assertEqual(set(_A ),set(_A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_A,(tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = ensure_valid_input(FuncNonContiguousArgs(),_A,_A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_A ),1 )
self.assertEqual(len(_A ),1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0],tokens["input_ids"] )
self.assertEqual(ordered_input_names[0],"input_ids" )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ),"-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx",generated.as_posix() )
| 216
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCAmelCase = {
"facebook/xglm-564M": 2_0_4_8,
}
class a_( lowercase__ ):
"""simple docstring"""
__snake_case : str =VOCAB_FILES_NAMES
__snake_case : Tuple =PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Any =['''input_ids''', '''attention_mask''']
def __init__( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : List[Any]="<unk>" , lowerCAmelCase__ : Any="<pad>" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : List[Any] , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words)]
SCREAMING_SNAKE_CASE = kwargs.get('additional_special_tokens' , [])
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase__))
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 1
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
SCREAMING_SNAKE_CASE = len(self.sp_model)
SCREAMING_SNAKE_CASE = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)}
self.fairseq_tokens_to_ids.update(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any]) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , lowerCAmelCase__ : Optional[Any]) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def __UpperCamelCase ( self : Tuple , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__))
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__))
def __UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a) * [0]
@property
def __UpperCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words
def __UpperCamelCase ( self : List[str]) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCamelCase ( self : List[str] , lowerCAmelCase__ : str) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__)
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(lowerCAmelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def __UpperCamelCase ( self : Dict , lowerCAmelCase__ : Optional[int]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''.join(lowerCAmelCase__).replace(lowerCAmelCase__ , ' ').strip()
return out_string
def __UpperCamelCase ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__ , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
| 259
|
import random
def A_ ( lowercase_ ) ->bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE = pow(lowercase_ , lowercase_ , lowercase_ )
if v != 1:
SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE = i + 1
SCREAMING_SNAKE_CASE = (v**2) % num
return True
def A_ ( lowercase_ ) ->bool:
"""simple docstring"""
if num < 2:
return False
SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase_ )
def A_ ( lowercase_ = 1_0_2_4 ) ->int:
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase_ ):
return num
if __name__ == "__main__":
__UpperCAmelCase = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 259
| 1
|
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def snake_case_ (UpperCamelCase : Dict[str, torch.Tensor] ):
'''simple docstring'''
_a = []
_a = []
_a = []
for rt in rc.restypes:
_a = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_a = {name: i for i, name in enumerate(UpperCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_a = torch.tensor(
UpperCamelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
_a = torch.tensor(
UpperCamelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
_a = torch.tensor(
UpperCamelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
_a = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_a = restype_atomaa_to_atomaa[protein_aatype]
_a = restype_atomaa_mask[protein_aatype]
_a = residx_atomaa_mask
_a = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_a = restype_atomaa_to_atomaa[protein_aatype]
_a = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_a = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
_a = rc.restype_atoa[restype_letter]
_a = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_a = rc.atom_order[atom_name]
_a = 1
_a = restype_atomaa_mask[protein_aatype]
_a = residx_atomaa_mask
return protein
def snake_case_ (UpperCamelCase : Dict[str, torch.Tensor] ):
'''simple docstring'''
_a = tree_map(lambda UpperCamelCase : torch.tensor(UpperCamelCase , device=batch['''aatype'''].device ) , UpperCamelCase , np.ndarray )
_a = tensor_tree_map(lambda UpperCamelCase : np.array(UpperCamelCase ) , make_atomaa_masks(UpperCamelCase ) )
return out
| 22
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 1
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[Any] , *lowercase_ : List[Any] , lowercase_ : Optional[Any]=None , lowercase_ : Tuple=None , **lowercase_ : Union[str, Any] ):
super().__init__(*lowercase_ , **lowercase_ )
snake_case_ : str = eval_examples
snake_case_ : Optional[Any] = post_process_function
def _snake_case ( self : Union[str, Any] , lowercase_ : Optional[Dataset] = None , lowercase_ : str=None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "eval" , **lowercase_ : Dict , ):
snake_case_ : int = gen_kwargs.copy()
snake_case_ : Dict = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
snake_case_ : Tuple = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
snake_case_ : Optional[Any] = gen_kwargs
snake_case_ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case_ : Optional[Any] = self.get_eval_dataloader(lowercase_ )
snake_case_ : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ : Optional[int] = self.compute_metrics
snake_case_ : str = None
snake_case_ : int = time.time()
snake_case_ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ : str = eval_loop(
lowercase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
snake_case_ : Optional[int] = compute_metrics
snake_case_ : Dict = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case_ : Tuple = self.post_process_function(lowercase_ , lowercase_ , lowercase_ )
snake_case_ : Optional[int] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
snake_case_ : int = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
snake_case_ : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case_ : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def _snake_case ( self : Any , lowercase_ : int , lowercase_ : List[str] , lowercase_ : str=None , lowercase_ : str = "test" , **lowercase_ : Dict ):
snake_case_ : Dict = gen_kwargs.copy()
snake_case_ : Dict = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ : Tuple = self.compute_metrics
snake_case_ : int = None
snake_case_ : List[Any] = time.time()
snake_case_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ : List[str] = eval_loop(
lowercase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
snake_case_ : str = compute_metrics
snake_case_ : int = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case_ : Tuple = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , '''predict''' )
snake_case_ : Dict = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
snake_case_ : Union[str, Any] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 485
|
"""simple docstring"""
def __lowercase ( _a ):
snake_case_ : Optional[Any] = int(_a )
if decimal in (0, 1): # Exit cases for the recursion
return str(_a )
snake_case_, snake_case_ : Optional[int] = divmod(_a , 2 )
return binary_recursive(_a ) + str(_a )
def __lowercase ( _a ):
snake_case_ : Dict = str(_a ).strip()
if not number:
raise ValueError('''No input value was provided''' )
snake_case_ : List[str] = '''-''' if number.startswith('''-''' ) else ''''''
snake_case_ : Optional[Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f"{negative}0b{binary_recursive(int(_a ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 485
| 1
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
if index == number_of_items:
return 0
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Any = knapsack(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , index + 1 )
if weights[index] <= max_weight:
__lowerCamelCase : Tuple = values[index] + knapsack(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , max_weight - weights[index] , index + 1 )
return max(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BILINEAR , __snake_case = True , __snake_case = None , __snake_case = True , __snake_case = 1 / 255 , __snake_case = True , __snake_case = None , __snake_case = None , **__snake_case , ) -> None:
'''simple docstring'''
super().__init__(**__snake_case )
__a =size if size is not None else {'shortest_edge': 256}
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
__a =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__a =get_size_dict(__snake_case , param_name='crop_size' )
__a =do_resize
__a =size
__a =resample
__a =do_center_crop
__a =crop_size
__a =do_rescale
__a =rescale_factor
__a =do_normalize
__a =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BICUBIC , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__a =get_resize_output_image_size(__snake_case , size=size['shortest_edge'] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(__snake_case , size=(size['height'], size['width']) , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case ) -> np.ndarray:
'''simple docstring'''
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ) -> str:
'''simple docstring'''
__a =do_resize if do_resize is not None else self.do_resize
__a =size if size is not None else self.size
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
__a =resample if resample is not None else self.resample
__a =do_center_crop if do_center_crop is not None else self.do_center_crop
__a =crop_size if crop_size is not None else self.crop_size
__a =get_size_dict(__snake_case , param_name='crop_size' )
__a =do_rescale if do_rescale is not None else self.do_rescale
__a =rescale_factor if rescale_factor is not None else self.rescale_factor
__a =do_normalize if do_normalize is not None else self.do_normalize
__a =image_mean if image_mean is not None else self.image_mean
__a =image_std if image_std is not None else self.image_std
__a =make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__a =[to_numpy_array(__snake_case ) for image in images]
if do_resize:
__a =[self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
__a =[self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
__a =[self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
__a =[self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
__a =[to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__a ={'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple:
'''simple docstring'''
__a =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__snake_case ):
__a =target_sizes.numpy()
__a =[]
for idx in range(len(__snake_case ) ):
__a =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__snake_case )
__a =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__snake_case )
else:
__a =logits.argmax(dim=1 )
__a =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 242
| 0
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase :List[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCamelCase :List[Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : int = LEDTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = ['input_ids', 'attention_mask']
def __init__(self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
A_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
A_ : Dict = getattr(lowercase , pre_tok_state.pop("""type""" ) )
A_ : List[Any] = add_prefix_space
A_ : Dict = pre_tok_class(**lowercase )
A_ : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : Any = """post_processor"""
A_ : Dict = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
A_ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
A_ : List[str] = tuple(state["""cls"""] )
A_ : List[Any] = False
if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
A_ : Optional[Any] = add_prefix_space
A_ : Optional[Any] = True
if state.get("""trim_offsets""" , lowercase ) != trim_offsets:
A_ : List[Any] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : List[Any] = getattr(lowercase , state.pop("""type""" ) )
A_ : List[Any] = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _a (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a (self , lowercase ):
A_ : int = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
A_ : List[Any] = value
def _a (self , *lowercase , **lowercase ):
A_ : List[str] = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowercase , **lowercase )
def _a (self , lowercase , lowercase = None ):
A_ : int = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def _a (self , lowercase , lowercase=None ):
A_ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a (self , lowercase , lowercase = None ):
A_ : Dict = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a (self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ):
A_ : Optional[int] = super()._pad(
encoded_inputs=lowercase , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
# Load from model defaults
if return_attention_mask is None:
A_ : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A_ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A_ : Union[str, Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowercase )
if needs_to_be_padded:
A_ : Dict = len(lowercase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A_ : Dict = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A_ : Union[str, Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 686
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 686
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A_: Tuple = logging.get_logger(__name__)
A_: List[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
A_: Optional[int] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase = {}
with open(_A ,"""r""" ) as file:
for line_number, line in enumerate(_A ):
_lowercase = line.strip()
if line:
_lowercase = line.split()
_lowercase = line_number
_lowercase = words[0]
_lowercase = value
return result
def __lowerCAmelCase ( _A ,_A ,_A ,_A ,_A ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_lowercase = getattr(_A ,_A )
_lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
_lowercase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowercase = 'param'
if weight_type is not None and weight_type != "param":
_lowercase = getattr(_A ,_A ).shape
elif weight_type is not None and weight_type == "param":
_lowercase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowercase = getattr(_A ,_A )
_lowercase = shape_pointer.shape
# let's reduce dimension
_lowercase = value[0]
else:
_lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowercase = value
elif weight_type == "weight_g":
_lowercase = value
elif weight_type == "weight_v":
_lowercase = value
elif weight_type == "bias":
_lowercase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowercase = getattr(_A ,_A )
_lowercase = value
else:
_lowercase = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCAmelCase ( _A ,_A ,_A ,_A ,_A ):
"""simple docstring"""
_lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
_lowercase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowercase = 'param'
if weight_type is not None and weight_type != "param":
_lowercase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowercase = '.'.join([key, hf_param_name] )
else:
_lowercase = key
_lowercase = value if 'lm_head' in full_key else value[0]
A_: List[Any] = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def __lowerCAmelCase ( _A ,_A ,_A=None ,_A=None ):
"""simple docstring"""
_lowercase = False
for key, mapped_key in MAPPING.items():
_lowercase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowercase = True
if "*" in mapped_key:
_lowercase = name.split(_A )[0].split(""".""" )[-2]
_lowercase = mapped_key.replace("""*""" ,_A )
if "weight_g" in name:
_lowercase = 'weight_g'
elif "weight_v" in name:
_lowercase = 'weight_v'
elif "bias" in name:
_lowercase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowercase = 'weight'
else:
_lowercase = None
if hf_dict is not None:
rename_dict(_A ,_A ,_A ,_A ,_A )
else:
set_recursively(_A ,_A ,_A ,_A ,_A )
return is_used
return is_used
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
_lowercase = []
_lowercase = fairseq_model.state_dict()
_lowercase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowercase = False
if "conv_layers" in name:
load_conv_layer(
_A ,_A ,_A ,_A ,hf_model.config.feat_extract_norm == """group""" ,)
_lowercase = True
else:
_lowercase = load_wavaveca_layer(_A ,_A ,_A )
if not is_used:
unused_weights.append(_A )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __lowerCAmelCase ( _A ,_A ,_A ,_A ,_A ):
"""simple docstring"""
_lowercase = full_name.split("""conv_layers.""" )[-1]
_lowercase = name.split(""".""" )
_lowercase = int(items[0] )
_lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowercase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowercase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowercase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowercase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_A )
@torch.no_grad()
def __lowerCAmelCase ( _A ,_A ,_A=None ,_A=None ,_A=True ,_A=False ):
"""simple docstring"""
if config_path is not None:
_lowercase = WavaVecaConfig.from_pretrained(_A )
else:
_lowercase = WavaVecaConfig()
if is_seq_class:
_lowercase = read_txt_into_dict(_A )
_lowercase = idalabel
_lowercase = WavaVecaForSequenceClassification(_A )
_lowercase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=_A ,return_attention_mask=_A ,)
feature_extractor.save_pretrained(_A )
elif is_finetuned:
if dict_path:
_lowercase = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase = target_dict.pad_index
_lowercase = target_dict.bos_index
_lowercase = target_dict.eos_index
_lowercase = len(target_dict.symbols )
_lowercase = os.path.join(_A ,"""vocab.json""" )
if not os.path.isdir(_A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_A ) )
return
os.makedirs(_A ,exist_ok=_A )
_lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowercase = 0
_lowercase = 1
with open(_A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(_A ,_A )
_lowercase = WavaVecaCTCTokenizer(
_A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=_A ,)
_lowercase = True if config.feat_extract_norm == 'layer' else False
_lowercase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=_A ,return_attention_mask=_A ,)
_lowercase = WavaVecaProcessor(feature_extractor=_A ,tokenizer=_A )
processor.save_pretrained(_A )
_lowercase = WavaVecaForCTC(_A )
else:
_lowercase = WavaVecaForPreTraining(_A )
if is_finetuned or is_seq_class:
_lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowercase = argparse.Namespace(task="""audio_pretraining""" )
_lowercase = fairseq.tasks.setup_task(_A )
_lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=_A )
_lowercase = model[0].eval()
recursively_load_weights(_A ,_A ,not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
A_: Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
A_: Dict = parser.parse_args()
A_: Union[str, Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 398
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 239
| 0
|
"""simple docstring"""
import sys
import turtle
def _lowerCamelCase ( lowerCamelCase__ : tuple[float, float] , lowerCamelCase__ : tuple[float, float] ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _lowerCamelCase ( lowerCamelCase__ : tuple[float, float] , lowerCamelCase__ : tuple[float, float] , lowerCamelCase__ : tuple[float, float] , lowerCamelCase__ : int , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
triangle(lowerCamelCase__ , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , get_mid(lowerCamelCase__ , lowerCamelCase__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
__snake_case = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
__snake_case = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 720
|
"""simple docstring"""
from math import sqrt
def _lowerCamelCase ( lowerCamelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( lowerCamelCase__ : int = 1_00_01 ):
lowercase__ : List[str] = 0
lowercase__ : Optional[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F"{solution() = }")
| 128
| 0
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float = 1 / sqrt(2 ) )-> IIRFilter:
_snake_case : List[Any] = tau * frequency / samplerate
_snake_case : Union[str, Any] = sin(lowerCAmelCase )
_snake_case : Optional[Any] = cos(lowerCAmelCase )
_snake_case : Union[str, Any] = _sin / (2 * q_factor)
_snake_case : List[str] = (1 - _cos) / 2
_snake_case : Tuple = 1 - _cos
_snake_case : Any = 1 + alpha
_snake_case : List[Any] = -2 * _cos
_snake_case : Optional[int] = 1 - alpha
_snake_case : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float = 1 / sqrt(2 ) )-> IIRFilter:
_snake_case : Dict = tau * frequency / samplerate
_snake_case : int = sin(lowerCAmelCase )
_snake_case : Dict = cos(lowerCAmelCase )
_snake_case : Optional[Any] = _sin / (2 * q_factor)
_snake_case : Tuple = (1 + _cos) / 2
_snake_case : Union[str, Any] = -1 - _cos
_snake_case : Dict = 1 + alpha
_snake_case : int = -2 * _cos
_snake_case : Optional[Any] = 1 - alpha
_snake_case : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float = 1 / sqrt(2 ) )-> IIRFilter:
_snake_case : List[str] = tau * frequency / samplerate
_snake_case : List[str] = sin(lowerCAmelCase )
_snake_case : Union[str, Any] = cos(lowerCAmelCase )
_snake_case : str = _sin / (2 * q_factor)
_snake_case : Any = _sin / 2
_snake_case : Dict = 0
_snake_case : Dict = -ba
_snake_case : Dict = 1 + alpha
_snake_case : Optional[Any] = -2 * _cos
_snake_case : Tuple = 1 - alpha
_snake_case : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float = 1 / sqrt(2 ) )-> IIRFilter:
_snake_case : List[Any] = tau * frequency / samplerate
_snake_case : List[Any] = sin(lowerCAmelCase )
_snake_case : int = cos(lowerCAmelCase )
_snake_case : List[str] = _sin / (2 * q_factor)
_snake_case : List[Any] = 1 - alpha
_snake_case : Tuple = -2 * _cos
_snake_case : int = 1 + alpha
_snake_case : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: float = 1 / sqrt(2 ) , )-> IIRFilter:
_snake_case : Any = tau * frequency / samplerate
_snake_case : str = sin(lowerCAmelCase )
_snake_case : List[str] = cos(lowerCAmelCase )
_snake_case : Union[str, Any] = _sin / (2 * q_factor)
_snake_case : Optional[Any] = 10 ** (gain_db / 40)
_snake_case : Any = 1 + alpha * big_a
_snake_case : int = -2 * _cos
_snake_case : Any = 1 - alpha * big_a
_snake_case : List[str] = 1 + alpha / big_a
_snake_case : Any = -2 * _cos
_snake_case : int = 1 - alpha / big_a
_snake_case : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: float = 1 / sqrt(2 ) , )-> IIRFilter:
_snake_case : Tuple = tau * frequency / samplerate
_snake_case : List[str] = sin(lowerCAmelCase )
_snake_case : Any = cos(lowerCAmelCase )
_snake_case : str = _sin / (2 * q_factor)
_snake_case : List[Any] = 10 ** (gain_db / 40)
_snake_case : str = (big_a + 1) - (big_a - 1) * _cos
_snake_case : Tuple = (big_a + 1) + (big_a - 1) * _cos
_snake_case : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
_snake_case : List[str] = (big_a - 1) + (big_a + 1) * _cos
_snake_case : Union[str, Any] = 2 * sqrt(lowerCAmelCase ) * alpha
_snake_case : Union[str, Any] = big_a * (pmc + aaa)
_snake_case : Union[str, Any] = 2 * big_a * mpc
_snake_case : List[Any] = big_a * (pmc - aaa)
_snake_case : List[Any] = ppmc + aaa
_snake_case : Any = -2 * pmpc
_snake_case : List[str] = ppmc - aaa
_snake_case : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: float = 1 / sqrt(2 ) , )-> IIRFilter:
_snake_case : Optional[int] = tau * frequency / samplerate
_snake_case : Union[str, Any] = sin(lowerCAmelCase )
_snake_case : Optional[Any] = cos(lowerCAmelCase )
_snake_case : List[Any] = _sin / (2 * q_factor)
_snake_case : Union[str, Any] = 10 ** (gain_db / 40)
_snake_case : List[Any] = (big_a + 1) - (big_a - 1) * _cos
_snake_case : List[Any] = (big_a + 1) + (big_a - 1) * _cos
_snake_case : int = (big_a - 1) - (big_a + 1) * _cos
_snake_case : int = (big_a - 1) + (big_a + 1) * _cos
_snake_case : List[Any] = 2 * sqrt(lowerCAmelCase ) * alpha
_snake_case : Tuple = big_a * (ppmc + aaa)
_snake_case : Union[str, Any] = -2 * big_a * pmpc
_snake_case : Union[str, Any] = big_a * (ppmc - aaa)
_snake_case : int = pmc + aaa
_snake_case : List[Any] = 2 * mpc
_snake_case : Tuple = pmc - aaa
_snake_case : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 411
|
import string
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = ''
for i in sequence:
_snake_case : Tuple = ord(lowerCAmelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = string.ascii_letters
_snake_case : List[str] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase )] if c in letters else c for c in sequence )
def lowerCamelCase_ ( )-> None:
from timeit import timeit
print('Running performance benchmarks...' )
_snake_case : List[str] = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCAmelCase )} seconds""" )
print(F"""> atbash(): {timeit('atbash(printable)' , setup=lowerCAmelCase )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 411
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4
| 1
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase : Tuple = mock.Mock()
_UpperCAmelCase : str = 500
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Dict = HTTPError
_UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : Optional[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=A ) as mock_head:
_UpperCAmelCase : List[str] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _A ( self : Any ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase : Optional[Any] = mock.Mock()
_UpperCAmelCase : List[str] = 500
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = HTTPError
_UpperCAmelCase : List[Any] = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : Optional[Any] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=A ) as mock_head:
_UpperCAmelCase : List[str] = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _A ( self : str ):
# This test is for deprecated behavior and can be removed in v5
try:
_UpperCAmelCase : List[Any] = tempfile.mktemp()
with open(A , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , A )
_UpperCAmelCase : Tuple = AlbertTokenizer.from_pretrained(A )
finally:
os.remove(A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , A )
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _A ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase : Tuple = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _A ( cls : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = TOKEN
HfFolder.save_token(A )
@classmethod
def _A ( cls : str ):
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _A ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[int] = os.path.join(A , "vocab.txt" )
with open(A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : Any = BertTokenizer(A )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A , repo_id="test-tokenizer" , push_to_hub=A , use_auth_token=self._token )
_UpperCAmelCase : Any = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _A ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : str = os.path.join(A , "vocab.txt" )
with open(A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : Union[str, Any] = BertTokenizer(A )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
_UpperCAmelCase : str = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A , repo_id="valid_org/test-tokenizer-org" , push_to_hub=A , use_auth_token=self._token )
_UpperCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _A ( self : Optional[int] ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Dict = os.path.join(A , "vocab.txt" )
with open(A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : Optional[Any] = CustomTokenizer(A )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[Any] = os.path.join(A , "vocab.txt" )
with open(A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : List[Any] = BertTokenizerFast.from_pretrained(A )
bert_tokenizer.save_pretrained(A )
_UpperCAmelCase : int = CustomTokenizerFast.from_pretrained(A )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=A , trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _A ( self : int ):
_UpperCAmelCase : Any = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _A ( self : Optional[int] ):
_UpperCAmelCase : Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _A ( self : List[Any] ):
_UpperCAmelCase : str = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _A ( self : int ):
_UpperCAmelCase : str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _A ( self : int ):
_UpperCAmelCase : List[str] = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _A ( self : Tuple ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
_UpperCAmelCase : int = Trie()
_UpperCAmelCase : List[str] = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A , ["AB", "C"] )
| 244
|
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__SCREAMING_SNAKE_CASE : Optional[int] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: int = PegasusConfig
__UpperCamelCase: List[Any] = {}
__UpperCamelCase: Dict = "gelu"
def __init__( self : int , A : Optional[int] , A : str=13 , A : List[str]=7 , A : Optional[int]=True , A : Union[str, Any]=False , A : List[str]=99 , A : Any=32 , A : Tuple=5 , A : Optional[int]=4 , A : Tuple=37 , A : str=0.1 , A : Optional[Any]=0.1 , A : Dict=20 , A : int=2 , A : List[str]=1 , A : Optional[Any]=0 , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : Optional[Any] = is_training
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : List[str] = eos_token_id
_UpperCAmelCase : Dict = pad_token_id
_UpperCAmelCase : int = bos_token_id
def _A ( self : Optional[int] ):
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase : List[str] = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def _A ( self : Any , A : str , A : Optional[Any] , A : Optional[Any] ):
_UpperCAmelCase : Optional[int] = 20
_UpperCAmelCase : Optional[Any] = model_class_name(A )
_UpperCAmelCase : str = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , A , A )
_UpperCAmelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
_UpperCAmelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
_UpperCAmelCase : Optional[Any] = model.decode(A , A )
_UpperCAmelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def _A ( self : Tuple , A : Union[str, Any] , A : Optional[Any] , A : int ):
_UpperCAmelCase : Optional[int] = 20
_UpperCAmelCase : Tuple = model_class_name(A )
_UpperCAmelCase : Dict = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , A , A )
_UpperCAmelCase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : Any = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
_UpperCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
_UpperCAmelCase : Optional[int] = model.decode(A , A , decoder_attention_mask=A )
_UpperCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Any=None , ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase : Any = np.not_equal(_UpperCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : int = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Any = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCamelCase: Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCamelCase: List[str] = True
__UpperCamelCase: List[Any] = False
__UpperCamelCase: Any = False
__UpperCamelCase: str = False
def _A ( self : List[Any] ):
_UpperCAmelCase : int = FlaxPegasusModelTester(self )
_UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A )
def _A ( self : List[str] ):
self.config_tester.run_common_tests()
def _A ( self : int ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def _A ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def _A ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(A , A )
_UpperCAmelCase : List[Any] = model_class(A )
@jax.jit
def encode_jitted(A : List[Any] , A : List[Any]=None , **A : Optional[int] ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : int = encode_jitted(**A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : List[Any] = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def _A ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Union[str, Any] = model_class(A )
_UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCAmelCase : Optional[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(A : List[Any] , A : Optional[int] , A : Optional[int] ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Any = decode_jitted(**A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : List[str] = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _A ( self : List[str] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class_name.from_pretrained("google/pegasus-large" , from_pt=A )
_UpperCAmelCase : List[Any] = np.ones((1, 1) )
_UpperCAmelCase : Dict = model(A )
self.assertIsNotNone(A )
@slow
def _A ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_UpperCAmelCase : Any = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_UpperCAmelCase : Dict = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_UpperCAmelCase : Dict = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_UpperCAmelCase : List[str] = tokenizer(A , return_tensors="np" , truncation=A , max_length=512 , padding=A )
_UpperCAmelCase : List[str] = model.generate(**A , num_beams=2 ).sequences
_UpperCAmelCase : List[str] = tokenizer.batch_decode(A , skip_special_tokens=A )
assert tgt_text == decoded
| 244
| 1
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__A = Vector()
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase_ ) , """(0,0,0,0,0,1)""" )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase_ ) , 4 )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = Vector([1, 2] )
__A = Vector([1, 2, 3, 4, 5] )
__A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__A = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = Vector([1, 2, 3] )
__A = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = Vector([1, 2, 3] )
__A = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = Vector([1, 2, 3] )
__A = Vector([2, -1, 4] ) # for test of dot product
__A = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = Vector([1, 2, 3] )
__A = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase_ , UpperCamelCase_ ) ) , """(3,4,7)""" )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = Vector([1, 0, 0, 0, 0, 0] )
__A = x.copy()
self.assertEqual(str(UpperCamelCase_ ) , str(UpperCamelCase_ ) )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase_ ) , """(0,1,0)""" )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase_ ) )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__A = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase_ ) )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 720
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "AutoImageProcessor"
SCREAMING_SNAKE_CASE = "AutoTokenizer"
def __init__( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : str ):
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__A = self.image_processor
def __call__( self : Optional[Any] , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : int ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__A = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
__A = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
__A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 199
| 0
|
_UpperCamelCase = 8.3_14_45_98
def lowerCAmelCase__( lowercase : float , lowercase : float ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_UpperCamelCase = 300
_UpperCamelCase = 28
_UpperCamelCase = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 243
|
import tensorflow as tf
from ...tf_utils import shape_list
class _lowerCamelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
__snake_case : int = vocab_size
__snake_case : str = d_embed
__snake_case : List[Any] = d_proj
__snake_case : List[str] = cutoffs + [vocab_size]
__snake_case : str = [0] + self.cutoffs
__snake_case : Union[str, Any] = div_val
__snake_case : List[str] = self.cutoffs[0]
__snake_case : Any = len(self.cutoffs ) - 1
__snake_case : Dict = self.shortlist_size + self.n_clusters
__snake_case : Dict = keep_order
__snake_case : List[str] = []
__snake_case : Union[str, Any] = []
def UpperCAmelCase ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
__snake_case : Optional[int] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=UpperCAmelCase , name="cluster_weight" )
__snake_case : Any = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=UpperCAmelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__snake_case : Dict = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_projs_._{i}""" , )
self.out_projs.append(UpperCAmelCase )
else:
self.out_projs.append(UpperCAmelCase )
__snake_case : Optional[int] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
__snake_case : int = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__snake_case , __snake_case : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case : List[Any] = self.d_embed // (self.div_val**i)
__snake_case : Optional[int] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_projs_._{i}""" )
self.out_projs.append(UpperCAmelCase )
__snake_case : Any = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
__snake_case : Optional[int] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=UpperCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = x
if proj is not None:
__snake_case : List[str] = tf.einsum("ibd,ed->ibe" , UpperCAmelCase , UpperCAmelCase )
return tf.einsum("ibd,nd->ibn" , UpperCAmelCase , UpperCAmelCase ) + b
@staticmethod
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case : Any = shape_list(UpperCAmelCase )
__snake_case : Optional[int] = tf.range(lp_size[0] , dtype=target.dtype )
__snake_case : Union[str, Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = 0
if self.n_clusters == 0:
__snake_case : int = self._logit(UpperCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__snake_case : Dict = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCAmelCase , logits=UpperCAmelCase )
__snake_case : int = tf.nn.log_softmax(UpperCAmelCase , axis=-1 )
else:
__snake_case : Optional[int] = shape_list(UpperCAmelCase )
__snake_case : List[Any] = []
__snake_case : str = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__snake_case , __snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__snake_case : Optional[Any] = (target >= l_idx) & (target < r_idx)
__snake_case : Union[str, Any] = tf.where(UpperCAmelCase )
__snake_case : Optional[Any] = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase ) - l_idx
if self.div_val == 1:
__snake_case : Dict = self.out_layers[0][0][l_idx:r_idx]
__snake_case : int = self.out_layers[0][1][l_idx:r_idx]
else:
__snake_case : Union[str, Any] = self.out_layers[i][0]
__snake_case : Optional[int] = self.out_layers[i][1]
if i == 0:
__snake_case : Any = tf.concat([cur_W, self.cluster_weight] , 0 )
__snake_case : str = tf.concat([cur_b, self.cluster_bias] , 0 )
__snake_case : Dict = self._logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.out_projs[0] )
__snake_case : List[Any] = tf.nn.log_softmax(UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__snake_case : int = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase )
__snake_case : List[str] = self._gather_logprob(UpperCAmelCase , UpperCAmelCase )
else:
__snake_case : int = self._logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.out_projs[i] )
__snake_case : Optional[Any] = tf.nn.log_softmax(UpperCAmelCase )
__snake_case : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
__snake_case : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCAmelCase )
if target is not None:
__snake_case : Optional[Any] = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase )
__snake_case : Optional[int] = tf.boolean_mask(UpperCAmelCase , UpperCAmelCase )
__snake_case : Optional[int] = self._gather_logprob(UpperCAmelCase , UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCAmelCase , -cur_logprob , shape_list(UpperCAmelCase ) )
__snake_case : Dict = tf.concat(UpperCAmelCase , axis=-1 )
if target is not None:
if return_mean:
__snake_case : int = tf.reduce_mean(UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCAmelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 243
| 1
|
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_snake_case = logging.get_logger(__name__)
class _a :
a_ : int = None
@experimental
def snake_case ( _a: Tuple , _a: Dict , _a: Optional[int] , _a: Optional[Any] , _a: List[Any] , _a: List[str] , _a: Optional[int] )-> Dict:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return _map_with_joblib(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def snake_case ( _a: Optional[int] , _a: Union[str, Any] , _a: Union[str, Any] , _a: Optional[int] , _a: List[str] , _a: int , _a: int )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = num_proc if num_proc <= len(snake_case_ ) else len(snake_case_ )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(snake_case_ ):
lowerCamelCase__ = len(snake_case_ ) // num_proc
lowerCamelCase__ = len(snake_case_ ) % num_proc
lowerCamelCase__ = div * index + min(snake_case_ , snake_case_ )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(snake_case_ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'Error dividing inputs iterable among processes. '
F'Total number of objects {len(snake_case_ )}, '
F'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
F'Spawning {num_proc} processes for {len(snake_case_ )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(snake_case_ , initargs=snake_case_ , initializer=snake_case_ ) as pool:
lowerCamelCase__ = pool.map(snake_case_ , snake_case_ )
logger.info(F'Finished {num_proc} processes' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'Unpacked {len(snake_case_ )} objects' )
return mapped
def snake_case ( _a: Optional[int] , _a: str , _a: Any , _a: Union[str, Any] , _a: str , _a: Optional[int] , _a: Union[str, Any] )-> Optional[int]:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=snake_case_ ):
return joblib.Parallel()(
joblib.delayed(snake_case_ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case ( _a: str )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 703
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCamelCase ( lowercase , lowercase , lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = StableUnCLIPPipeline
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
SCREAMING_SNAKE_CASE__ = False
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = 32
UpperCAmelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=A__ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
UpperCAmelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=A__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=A__ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase_ = StableUnCLIPImageNormalizer(embedding_dim=A__ )
UpperCAmelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A__ , layers_per_block=1 , upcast_attention=A__ , use_linear_projection=A__ , )
torch.manual_seed(0 )
UpperCAmelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=A__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL()
UpperCAmelCase_ = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __A ( self : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int]=0 ):
'''simple docstring'''
if str(A__ ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(A__ )
else:
UpperCAmelCase_ = torch.Generator(device=A__ ).manual_seed(A__ )
UpperCAmelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=A__ )
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=A__ )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __A ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
UpperCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe("anime turle" , generator=A__ , output_type="np" )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A__ , A__ )
def __A ( self : Optional[Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 162
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Any = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[str] = '''gpt_neo'''
UpperCAmelCase__: str = ['''past_key_values''']
UpperCAmelCase__: Dict = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , A__=5_0257 , A__=2048 , A__=2048 , A__=24 , A__=[[["global", "local"], 12]] , A__=16 , A__=None , A__=256 , A__="gelu_new" , A__=0.0 , A__=0.0 , A__=0.0 , A__=0.1 , A__=1e-5 , A__=0.0_2 , A__=True , A__=5_0256 , A__=5_0256 , **A__ , ):
A__ : Optional[Any] = vocab_size
A__ : Optional[int] = max_position_embeddings
A__ : Optional[int] = hidden_size
A__ : Dict = num_layers
A__ : List[str] = num_heads
A__ : Optional[int] = intermediate_size
A__ : Union[str, Any] = window_size
A__ : Optional[int] = activation_function
A__ : Optional[Any] = resid_dropout
A__ : Optional[int] = embed_dropout
A__ : Any = attention_dropout
A__ : Optional[Any] = classifier_dropout
A__ : int = layer_norm_epsilon
A__ : Tuple = initializer_range
A__ : str = use_cache
A__ : List[str] = bos_token_id
A__ : List[Any] = eos_token_id
A__ : Any = attention_types
A__ : List[str] = self.expand_attention_types_params(A__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=A__ , eos_token_id=A__ , **A__ )
@staticmethod
def __A ( A__ ):
A__ : int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCamelCase (lowercase_: Dict , lowercase_: Any , lowercase_: Union[str, Any] , lowercase_: Optional[Any] ) -> Dict:
import torch
A__ : List[Any] = input.size()
A__ : List[str] = len(lowercase_ )
A__ : Union[str, Any] = shape[dimension]
A__ : Optional[int] = torch.arange(0 , lowercase_ , lowercase_ )
A__ : str = torch.div(sizedim - size , lowercase_ , rounding_mode="""floor""" ) + 1
A__ : Any = torch.arange(lowercase_ ) + low_indices[:min_length][:, None]
A__ : Union[str, Any] = [slice(lowercase_ )] * rank
A__ : Any = indices
A__ : List[str] = input[s]
A__ : Dict = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowercase_ )
def UpperCamelCase (lowercase_: str , lowercase_: int ) -> int:
import torch
A__ : Union[str, Any] = torch.arange(1 , lowercase_ )
A__ : str = torch.remainder(lowercase_ , lowercase_ )
A__ : Optional[Any] = remainders == 0
A__ : Any = candidates[divisor_indices]
A__ : Optional[Any] = torch.max(lowercase_ )
return largest_divisor, torch.div(lowercase_ , lowercase_ , rounding_mode="""floor""" )
class _a (__magic_name__ ):
'''simple docstring'''
@property
def __A ( self ):
A__ : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A__ , direction="""inputs""" )
A__ : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A__ : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __A ( self ):
return self._config.num_heads
def __A ( self , A__ , A__ = -1 , A__ = -1 , A__ = False , A__ = None , ):
A__ : int = super(A__ , self ).generate_dummy_inputs(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
# We need to order the input in the way they appears in the forward()
A__ : Optional[int] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A__ , A__ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A__ : int = seqlen + 2
A__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Dict = [
(torch.zeros(A__ ), torch.zeros(A__ )) for _ in range(self.num_layers )
]
A__ : List[Any] = common_inputs["""attention_mask"""]
if self.use_past:
A__ : int = ordered_inputs["""attention_mask"""].dtype
A__ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A__ , A__ , dtype=A__ )] , dim=1 )
return ordered_inputs
@property
def __A ( self ):
return 13
| 456
| 0
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
SCREAMING_SNAKE_CASE_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
SCREAMING_SNAKE_CASE_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _UpperCAmelCase ( self , A_ , A_ , A_ = 1 , A_ = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCamelCase , hypotheses=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase )
}
| 715
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class a ( UpperCAmelCase ):
_lowercase = "timesformer"
def __init__( self , A_=224 , A_=16 , A_=3 , A_=8 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-6 , A_=True , A_="divided_space_time" , A_=0 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : List[Any] = num_frames
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : str = qkv_bias
_UpperCAmelCase : Tuple = attention_type
_UpperCAmelCase : List[Any] = drop_path_rate
| 467
| 0
|
'''simple docstring'''
import numpy as np
import datasets
__A : Optional[int] = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
__A : List[str] = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
__A : Tuple = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def a__ ( self :List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" ,id="""sequence""" ) ,id="""X""" ),
} ) ,)
def a__ ( self :str ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[str] ):
snake_case_ : Any = np.array(_UpperCAmelCase )
snake_case_ : Union[str, Any] = np.array(_UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
snake_case_ : Union[str, Any] = X - np.mean(_UpperCAmelCase )
snake_case_ : Tuple = np.cov(reference_distribution.T )
try:
snake_case_ : str = np.linalg.inv(_UpperCAmelCase )
except np.linalg.LinAlgError:
snake_case_ : List[Any] = np.linalg.pinv(_UpperCAmelCase )
snake_case_ : int = np.dot(_UpperCAmelCase ,_UpperCAmelCase )
snake_case_ : Union[str, Any] = np.dot(_UpperCAmelCase ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 334
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[int] = parent
__A : str = 13
__A : List[Any] = 7
__A : List[str] = True
__A : str = True
__A : Optional[Any] = True
__A : int = True
__A : Dict = 99
__A : Dict = 384
__A : Any = 2
__A : int = 4
__A : Optional[Any] = 37
__A : Optional[int] = 'gelu'
__A : Dict = 0.1
__A : Optional[int] = 0.1
__A : Any = 512
__A : int = 16
__A : List[str] = 2
__A : str = 0.02
__A : Any = 3
__A : str = 4
__A : Union[str, Any] = 128
__A : int = 2
__A : List[Any] = 9
__A : List[Any] = 1
__A : List[Any] = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : str = None
if self.use_input_mask:
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[int] = None
__A : List[str] = None
__A : Dict = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = TFConvBertModel(config=_UpperCAmelCase)
__A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Tuple = [input_ids, input_mask]
__A : Any = model(_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = TFConvBertForMaskedLM(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : str = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = self.num_labels
__A : Any = TFConvBertForSequenceClassification(config=_UpperCAmelCase)
__A : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.num_choices
__A : List[str] = TFConvBertForMultipleChoice(config=_UpperCAmelCase)
__A : int = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFConvBertForTokenClassification(config=_UpperCAmelCase)
__A : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = TFConvBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = TFConvBertModelTester(self)
__A : str = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = True
__A : List[str] = True
if hasattr(_UpperCAmelCase , 'use_cache'):
__A : List[Any] = True
__A : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : Union[str, Any] = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
for model_class in self.all_model_classes:
__A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_class(_UpperCAmelCase)
__A : Optional[Any] = len(model(_UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase)
__A : Union[str, Any] = os.path.join(_UpperCAmelCase , 'saved_model' , '1')
__A : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
__A : str = model(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Optional[int] = outputs['encoder_hidden_states']
__A : str = outputs['encoder_attentions']
else:
__A : List[Any] = outputs['hidden_states']
__A : Optional[Any] = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
__A : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
__A : str = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
__A : Any = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
__A : int = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
__A : Tuple = getattr(self.model_tester , 'key_length' , _UpperCAmelCase)
def check_decoder_attentions_output(_UpperCAmelCase):
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
__A : Any = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase):
__A : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__A : Dict = True
__A : Any = False
__A : str = model_class(_UpperCAmelCase)
__A : List[str] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : List[str] = len(_UpperCAmelCase)
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
if self.is_encoder_decoder:
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_decoder_attentions_output(_UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : int = True
__A : Tuple = model_class(_UpperCAmelCase)
__A : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Any = True
__A : str = True
__A : Union[str, Any] = model_class(_UpperCAmelCase)
__A : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase)
check_encoder_attentions_output(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[int] = model(_UpperCAmelCase)[0]
__A : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4)
| 8
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase ="""rwkv"""
__UpperCAmelCase ={"""max_position_embeddings""": """context_length"""}
def __init__( self , _A=5_0_2_7_7 , _A=1_0_2_4 , _A=4_0_9_6 , _A=3_2 , _A=None , _A=None , _A=1e-5 , _A=0 , _A=0 , _A=6 , _A=False , _A=True , **_A , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = context_length
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = rescale_every
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 102
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase ="""facebook/bart-large-mnli"""
__UpperCAmelCase =(
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__UpperCAmelCase ="""text_classifier"""
__UpperCAmelCase =AutoTokenizer
__UpperCAmelCase =AutoModelForSequenceClassification
__UpperCAmelCase =["""text""", ["""text"""]]
__UpperCAmelCase =["""text"""]
def A__ ( self ):
super().setup()
__lowerCAmelCase = self.model.config
__lowerCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
__lowerCAmelCase = int(_A )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def A__ ( self , _A , _A ):
__lowerCAmelCase = labels
return self.pre_processor(
[text] * len(_A ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , _A ):
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 102
| 1
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self._create_example_records()
lowerCamelCase_ : Tuple = Dataset.from_list(a_ )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(a_ ):
self.assertDictEqual(a_ , example_records[i] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self._create_example_records()
lowerCamelCase_ : Union[str, Any] = Dataset.from_list(a_ )
lowerCamelCase_ : Optional[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _UpperCamelCase ( self ): # checks what happens with missing columns
lowerCamelCase_ : Union[str, Any] = [{"col_1": 1}, {"col_2": "x"}]
lowerCamelCase_ : Union[str, Any] = Dataset.from_list(a_ )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def _UpperCamelCase ( self ): # checks if the type can be inferred from the second record
lowerCamelCase_ : List[str] = [{"col_1": []}, {"col_1": [1, 2]}]
lowerCamelCase_ : int = Dataset.from_list(a_ )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = Dataset.from_list([] )
self.assertEqual(len(a_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 250
|
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = array[indexa], array[indexa]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if length > 1:
lowerCamelCase_ : str = int(length / 2)
for i in range(lowerCAmelCase_ , low + middle):
comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_)
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if length > 1:
lowerCamelCase_ : Optional[int] = int(length / 2)
bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1)
bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0)
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 250
| 1
|
from math import ceil, sqrt
def __lowerCamelCase ( _lowerCAmelCase = 1_000_000 ) -> int:
_UpperCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_UpperCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_UpperCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 129
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 50_257 , __UpperCamelCase : int = 1_024 , __UpperCamelCase : int = 768 , __UpperCamelCase : int = 12 , __UpperCamelCase : int = 12 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "gelu_new" , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 1e-5 , __UpperCamelCase : float = 0.02 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , ):
super().__init__()
_UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
_UpperCAmelCase = prefix_inner_dim
_UpperCAmelCase = prefix_hidden_dim
_UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , __UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase = GPTaConfig(
vocab_size=__UpperCamelCase , n_positions=__UpperCamelCase , n_embd=__UpperCamelCase , n_layer=__UpperCamelCase , n_head=__UpperCamelCase , n_inner=__UpperCamelCase , activation_function=__UpperCamelCase , resid_pdrop=__UpperCamelCase , embd_pdrop=__UpperCamelCase , attn_pdrop=__UpperCamelCase , layer_norm_epsilon=__UpperCamelCase , initializer_range=__UpperCamelCase , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , scale_attn_by_inverse_layer_idx=__UpperCamelCase , reorder_and_upcast_attn=__UpperCamelCase , )
_UpperCAmelCase = GPTaLMHeadModel(__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : torch.Tensor , __UpperCamelCase : torch.Tensor , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[torch.Tensor] = None , ):
_UpperCAmelCase = self.transformer.transformer.wte(__UpperCamelCase )
_UpperCAmelCase = self.encode_prefix(__UpperCamelCase )
_UpperCAmelCase = self.decode_prefix(__UpperCamelCase )
_UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase = self.transformer(inputs_embeds=__UpperCamelCase , labels=__UpperCamelCase , attention_mask=__UpperCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : torch.device ):
return torch.zeros(__UpperCamelCase , self.prefix_length , dtype=torch.intaa , device=__UpperCamelCase )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Optional[Any] ):
return self.encode_prefix(__UpperCamelCase )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = torch.split(__UpperCamelCase , 1 , dim=0 )
_UpperCAmelCase = []
_UpperCAmelCase = []
for feature in features:
_UpperCAmelCase = self.decode_prefix(feature.to(__UpperCamelCase ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase = self.generate_beam(
input_embeds=__UpperCamelCase , device=__UpperCamelCase , eos_token_id=__UpperCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase = torch.stack(__UpperCamelCase )
_UpperCAmelCase = torch.stack(__UpperCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : int = 5 , __UpperCamelCase : int = 67 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : Optional[int] = None , ):
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = torch.ones(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.int )
_UpperCAmelCase = torch.zeros(__UpperCamelCase , device=__UpperCamelCase , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase = input_embeds
else:
_UpperCAmelCase = self.transformer.transformer.wte(__UpperCamelCase )
for i in range(__UpperCamelCase ):
_UpperCAmelCase = self.transformer(inputs_embeds=__UpperCamelCase )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase = logits.topk(__UpperCamelCase , -1 )
_UpperCAmelCase = generated.expand(__UpperCamelCase , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase = next_tokens
else:
_UpperCAmelCase = tokens.expand(__UpperCamelCase , *tokens.shape[1:] )
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase = -float(np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase = scores_sum_average.view(-1 ).topk(__UpperCamelCase , -1 )
_UpperCAmelCase = next_tokens // scores_sum.shape[1]
_UpperCAmelCase = seq_lengths[next_tokens_source]
_UpperCAmelCase = next_tokens % scores_sum.shape[1]
_UpperCAmelCase = next_tokens.unsqueeze(1 )
_UpperCAmelCase = tokens[next_tokens_source]
_UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase = generated[next_tokens_source]
_UpperCAmelCase = scores_sum_average * seq_lengths
_UpperCAmelCase = is_stopped[next_tokens_source]
_UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase = is_stopped + next_tokens.eq(__UpperCamelCase ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase = scores / seq_lengths
_UpperCAmelCase = scores.argsort(descending=__UpperCamelCase )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase = [tokens[i] for i in order]
_UpperCAmelCase = torch.stack(__UpperCamelCase , dim=0 )
_UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 129
| 1
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class a__ ( nn.Module ):
'''simple docstring'''
A : Optional[Any] = 42
A : List[str] = jnp.floataa
def lowerCAmelCase ( self : Dict ) -> Dict:
__A= nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , lowerCAmelCase_ : str ) -> str:
__A= hidden_states.shape
__A= jax.image.resize(
_a , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__A= self.conv(_a )
return hidden_states
class a__ ( nn.Module ):
'''simple docstring'''
A : Tuple = 42
A : List[str] = jnp.floataa
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
__A= nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , lowerCAmelCase_ : int ) -> int:
__A= self.conv(_a )
return hidden_states
class a__ ( nn.Module ):
'''simple docstring'''
A : List[Any] = 42
A : Dict = None
A : List[Any] = 0.0
A : int = None
A : str = jnp.floataa
def lowerCAmelCase ( self : int ) -> Any:
__A= self.in_channels if self.out_channels is None else self.out_channels
__A= nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__A= nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__A= nn.Dense(_a , dtype=self.dtype )
__A= nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__A= nn.Dropout(self.dropout_prob )
__A= nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__A= self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__A= None
if use_nin_shortcut:
__A= nn.Conv(
_a , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=True ) -> Optional[Any]:
__A= hidden_states
__A= self.norma(_a )
__A= nn.swish(_a )
__A= self.conva(_a )
__A= self.time_emb_proj(nn.swish(_a ) )
__A= jnp.expand_dims(jnp.expand_dims(_a , 1 ) , 1 )
__A= hidden_states + temb
__A= self.norma(_a )
__A= nn.swish(_a )
__A= self.dropout(_a , _a )
__A= self.conva(_a )
if self.conv_shortcut is not None:
__A= self.conv_shortcut(_a )
return hidden_states + residual
| 186
|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665
| 0
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
A__ = 10
def _lowercase ( a_ : int ,a_ : int ,a_ : list[int] ,a_ : int ) -> int:
'''simple docstring'''
for i in range(a_ ,a_ ):
if array[i] == target:
return i
return -1
def _lowercase ( a_ : list[int] ,a_ : int ) -> int:
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = len(a_ )
while left <= right:
if right - left < precision:
return lin_search(a_ ,a_ ,a_ ,a_ )
__magic_name__ = (left + right) // 3 + 1
__magic_name__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__magic_name__ = one_third - 1
elif array[two_third] < target:
__magic_name__ = two_third + 1
else:
__magic_name__ = one_third + 1
__magic_name__ = two_third - 1
else:
return -1
def _lowercase ( a_ : int ,a_ : int ,a_ : list[int] ,a_ : int ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(a_ ,a_ ,a_ ,a_ )
__magic_name__ = (left + right) // 3 + 1
__magic_name__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a_ ,one_third - 1 ,a_ ,a_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,a_ ,a_ ,a_ )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,a_ ,a_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = input("Enter numbers separated by comma:\n").strip()
A__ = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
A__ = int(input("Enter the number to be found in the list:\n").strip())
A__ = ite_ternary_search(collection, target)
A__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 184
|
from math import factorial, radians
def _lowercase ( a_ : float ,a_ : int = 1_8 ,a_ : int = 1_0 ) -> float:
'''simple docstring'''
__magic_name__ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__magic_name__ = radians(a_ )
__magic_name__ = angle_in_radians
__magic_name__ = 3
__magic_name__ = -1
for _ in range(a_ ):
result += (b * (angle_in_radians**a)) / factorial(a_ )
__magic_name__ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(a_ ,a_ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 184
| 1
|
'''simple docstring'''
lowerCAmelCase : List[Any] ={
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 172
|
'''simple docstring'''
from itertools import permutations
def UpperCAmelCase_ ( __lowerCamelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowercase_ :List[Any] = [7, 11, 13, 17]
for i, test in enumerate(__lowerCamelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase_ ( __lowerCamelCase : int = 10 ):
return sum(
int("".join(map(__lowerCamelCase ,__lowerCamelCase ) ) )
for num in permutations(range(__lowerCamelCase ) )
if is_substring_divisible(__lowerCamelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 172
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCamelCase_ :
lowercase = LEDConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , A=4 , ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : str = eos_token_id
UpperCAmelCase : Tuple = pad_token_id
UpperCAmelCase : str = bos_token_id
UpperCAmelCase : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase : Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase : Tuple = prepare_led_inputs_dict(A , A , A )
UpperCAmelCase : List[Any] = tf.concat(
[tf.zeros_like(A )[:, :-1], tf.ones_like(A )[:, -1:]] , axis=-1 , )
UpperCAmelCase : Tuple = global_attention_mask
return config, inputs_dict
def _lowercase( self , A , A ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = TFLEDModel(config=A ).get_decoder()
UpperCAmelCase : Union[str, Any] = inputs_dict["""input_ids"""]
UpperCAmelCase : str = input_ids[:1, :]
UpperCAmelCase : int = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase : str = 1
# first forward pass
UpperCAmelCase : Optional[int] = model(A , attention_mask=A , use_cache=A )
UpperCAmelCase , UpperCAmelCase : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Dict = model(A , attention_mask=A )[0]
UpperCAmelCase : List[Any] = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1e-3 )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
if attention_mask is None:
UpperCAmelCase : str = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = TFLEDModelTester(self )
UpperCAmelCase : Any = ConfigTester(self , config_class=A )
def _lowercase( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = tf.zeros_like(inputs_dict["""attention_mask"""] )
UpperCAmelCase : List[str] = 2
UpperCAmelCase : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
UpperCAmelCase : int = True
UpperCAmelCase : Any = self.model_tester.seq_length
UpperCAmelCase : Any = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(A ):
UpperCAmelCase : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(A ):
UpperCAmelCase : Optional[Any] = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Dict = model_class(A )
UpperCAmelCase : int = model(self._prepare_for_class(A , A ) )
UpperCAmelCase : Any = len(A )
self.assertEqual(config.output_hidden_states , A )
check_encoder_attentions_output(A )
if self.is_encoder_decoder:
UpperCAmelCase : Dict = model_class(A )
UpperCAmelCase : Dict = model(self._prepare_for_class(A , A ) )
self.assertEqual(config.output_hidden_states , A )
check_decoder_attentions_output(A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase : Any = True
UpperCAmelCase : Dict = model_class(A )
UpperCAmelCase : int = model(self._prepare_for_class(A , A ) )
self.assertEqual(config.output_hidden_states , A )
check_encoder_attentions_output(A )
# Check attention is always last and order is fine
UpperCAmelCase : Any = True
UpperCAmelCase : Any = True
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : Dict = model(self._prepare_for_class(A , A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A ) )
self.assertEqual(model.config.output_hidden_states , A )
check_encoder_attentions_output(A )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def _lowercase( self ) -> List[Any]:
pass
def _lowercase( self ) -> Optional[int]:
# TODO: Head-masking not yet implement
pass
def __lowerCamelCase ( _lowercase ) -> List[str]:
return tf.constant(_lowercase , dtype=tf.intaa )
a : int = 1E-4
@slow
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
UpperCAmelCase : List[Any] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Optional[Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Tuple = prepare_led_inputs_dict(model.config , A , A )
UpperCAmelCase : List[Any] = model(**A )[0]
UpperCAmelCase : Optional[Any] = (1, 1024, 768)
self.assertEqual(output.shape , A )
# change to expected output here
UpperCAmelCase : int = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , A , atol=1e-3 )
def _lowercase( self ) -> str:
UpperCAmelCase : str = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
UpperCAmelCase : List[str] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Union[str, Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Optional[int] = prepare_led_inputs_dict(model.config , A , A )
UpperCAmelCase : Union[str, Any] = model(**A )[0]
UpperCAmelCase : Optional[Any] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , A )
# change to expected output here
UpperCAmelCase : str = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , A , atol=1e-3 , rtol=1e-3 )
| 672
|
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase = True if "large" in model_name or "huge" in model_name else False
lowercase = True if "large" in model_name or "huge" in model_name else False
lowercase = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase = [3, 3, 3, 3]
lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase = [4, 4, 4, 4]
lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase = [3, 3, 3, 3]
else:
lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase = 96
elif "small" in model_name:
lowercase = 96
elif "base" in model_name:
lowercase = 128
elif "large" in model_name:
lowercase = 192
elif "xlarge" in model_name:
lowercase = 256
elif "huge" in model_name:
lowercase = 352
# set label information
lowercase = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase = "imagenet-22k-id2label.json"
else:
lowercase = "imagenet-1k-id2label.json"
lowercase = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
lowercase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowercase = {v: k for k, v in idalabel.items()}
lowercase = FocalNetConfig(
embed_dim=lowerCAmelCase_ , depths=lowerCAmelCase_ , focal_levels=lowerCAmelCase_ , focal_windows=lowerCAmelCase_ , use_conv_embed=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ , use_post_layernorm=lowerCAmelCase_ , use_layerscale=lowerCAmelCase_ , )
return config
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase = "encoder." + name
if "encoder.layers" in name:
lowercase = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase = "layernorm.weight"
if name == "norm.bias":
lowercase = "layernorm.bias"
if "head" in name:
lowercase = name.replace("head" , "classifier" )
else:
lowercase = "focalnet." + name
return name
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
lowercase = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCAmelCase_ )
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase = state_dict.pop(lowerCAmelCase_ )
lowercase = val
lowercase = get_focalnet_config(lowerCAmelCase_ )
lowercase = FocalNetForImageClassification(lowerCAmelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase_ )
# verify conversion
lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase = BitImageProcessor(
do_resize=lowerCAmelCase_ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ , crop_size=224 , do_normalize=lowerCAmelCase_ , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ , )
lowercase = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
lowercase = processor(images=lowerCAmelCase_ , return_tensors="pt" )
lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowercase = image_transforms(lowerCAmelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCAmelCase_ , atol=1E-4 )
lowercase = model(**lowerCAmelCase_ )
lowercase = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
lowercase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
lowercase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
lowercase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
lowercase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
lowercase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print(f'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(f'{model_name}' )
processor.push_to_hub(f'{model_name}' )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 310
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = '''layoutlmv3'''
def __init__(self : Dict , A__ : List[str]=5_0_2_6_5 , A__ : str=7_6_8 , A__ : Tuple=1_2 , A__ : int=1_2 , A__ : Optional[Any]=3_0_7_2 , A__ : Tuple="gelu" , A__ : Union[str, Any]=0.1 , A__ : Any=0.1 , A__ : Union[str, Any]=5_1_2 , A__ : Dict=2 , A__ : Any=0.0_2 , A__ : List[str]=1e-5 , A__ : Optional[Any]=1 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1_0_2_4 , A__ : Optional[Any]=1_2_8 , A__ : Any=1_2_8 , A__ : List[str]=True , A__ : List[str]=3_2 , A__ : Optional[Any]=1_2_8 , A__ : List[Any]=6_4 , A__ : Union[str, Any]=2_5_6 , A__ : Optional[int]=True , A__ : int=True , A__ : Any=True , A__ : List[str]=2_2_4 , A__ : List[str]=3 , A__ : Optional[Any]=1_6 , A__ : Optional[int]=None , **A__ : List[Any] , ) -> Any:
super().__init__(
vocab_size=A__ , hidden_size=A__ , num_hidden_layers=A__ , num_attention_heads=A__ , intermediate_size=A__ , hidden_act=A__ , hidden_dropout_prob=A__ , attention_probs_dropout_prob=A__ , max_position_embeddings=A__ , type_vocab_size=A__ , initializer_range=A__ , layer_norm_eps=A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ , )
lowercase = max_ad_position_embeddings
lowercase = coordinate_size
lowercase = shape_size
lowercase = has_relative_attention_bias
lowercase = rel_pos_bins
lowercase = max_rel_pos
lowercase = has_spatial_attention_bias
lowercase = rel_ad_pos_bins
lowercase = max_rel_ad_pos
lowercase = text_embed
lowercase = visual_embed
lowercase = input_size
lowercase = num_channels
lowercase = patch_size
lowercase = classifier_dropout
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : List[Any] = version.parse('''1.12''' )
@property
def UpperCAmelCase__ (self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def UpperCAmelCase__ (self : Any ) -> float:
return 1e-5
@property
def UpperCAmelCase__ (self : Optional[int] ) -> int:
return 1_2
def UpperCAmelCase__ (self : Optional[int] , A__ : "ProcessorMixin" , A__ : int = -1 , A__ : int = -1 , A__ : bool = False , A__ : Optional["TensorType"] = None , A__ : int = 3 , A__ : int = 4_0 , A__ : int = 4_0 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , "apply_ocr" , A__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase = processor.tokenizer.num_special_tokens_to_add(A__ )
lowercase = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A__ )
# Generate dummy inputs according to compute batch and sequence
lowercase = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase = self._generate_dummy_images(A__ , A__ , A__ , A__ )
lowercase = dict(
processor(
A__ , text=A__ , boxes=A__ , return_tensors=A__ , ) )
return inputs
| 310
| 1
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
a__: List[str] = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 48_000,
"sample_size": 65_536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 48_000,
"sample_size": 65_536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 48_000,
"sample_size": 131_072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 16_000,
"sample_size": 65_536,
},
}
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any )->int:
return torch.atana(a_ , a_ ) / math.pi * 2
def UpperCamelCase__( UpperCamelCase__ : List[str] )->List[Any]:
A__ = torch.sin(t * math.pi / 2 ) ** 2
A__ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(a_ , a_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self,__lowerCamelCase ):
super().__init__()
A__ = DiffusionAttnUnetaD(lowerCamelCase_,n_attn_layers=4 )
A__ = deepcopy(self.diffusion )
A__ = torch.quasirandom.SobolEngine(1,scramble=lowerCamelCase_ )
def UpperCamelCase__( UpperCamelCase__ : Tuple )->Optional[int]:
A__ = MODELS_MAP[model_name]['''url''']
os.system(f"wget {url} ./" )
return f"./{model_name}.ckpt"
a__: str = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
a__: Optional[Any] = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
a__: Optional[int] = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
a__: Union[str, Any] = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
a__: Dict = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
a__: List[Any] = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def UpperCamelCase__( UpperCamelCase__ : Dict )->Union[str, Any]:
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def UpperCamelCase__( UpperCamelCase__ : str )->Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(a_ ) and not isinstance(a_ , a_ ):
return name.replace(a_ , a_ )
elif name.startswith(a_ ):
return [name.replace(a_ , a_ ) for v in value]
raise ValueError(f"Attn error with {name}" )
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=13 )->Tuple:
A__ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
A__ = 0
if string.startswith('''net.3.''' ):
depth += 1
A__ = string[6:]
elif string.startswith('''net.''' ):
A__ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
A__ = string[7:]
if string.startswith('''main.''' ):
A__ = string[5:]
# mid block
if string[:2].isdigit():
A__ = string[:2]
A__ = string[2:]
else:
A__ = string[0]
A__ = string[1:]
if depth == max_depth:
A__ = MID_NUM_TO_LAYER[layer_num]
A__ = '''mid_block'''
elif depth > 0 and int(a_ ) < 7:
A__ = DOWN_NUM_TO_LAYER[layer_num]
A__ = f"down_blocks.{depth}"
elif depth > 0 and int(a_ ) > 7:
A__ = UP_NUM_TO_LAYER[layer_num]
A__ = f"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
A__ = DEPTH_0_TO_LAYER[layer_num]
A__ = f"up_blocks.{max_depth - 1}" if int(a_ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f"Naming error with {input_string} and string_left: {string_left}." )
A__ = string_left[1:]
if "resnets" in new_layer:
A__ = convert_resconv_naming(a_ )
elif "attentions" in new_layer:
A__ = convert_attn_naming(a_ )
A__ = new_string_left
if not isinstance(a_ , a_ ):
A__ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
A__ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def UpperCamelCase__( UpperCamelCase__ : Tuple )->Optional[Any]:
A__ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
A__ = rename(a_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(a_ , a_ ):
A__ = transform_conv_attns(a_ , a_ , a_ )
else:
A__ = v
return new_state_dict
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] )->Union[str, Any]:
if len(a_ ) == 1:
if len(v.shape ) == 3:
# weight
A__ = v[:, :, 0]
else:
# bias
A__ = v
else:
# qkv matrices
A__ = v.shape[0]
A__ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A__ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A__ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCamelCase__( UpperCamelCase__ : Dict )->int:
A__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
A__ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
A__ = download(a_ )
A__ = MODELS_MAP[model_name]['''sample_rate''']
A__ = MODELS_MAP[model_name]['''sample_size''']
A__ = Object()
A__ = sample_size
A__ = sample_rate
A__ = 0
A__ = UNetaDModel(sample_size=a_ , sample_rate=a_ )
A__ = diffusers_model.state_dict()
A__ = DiffusionUncond(a_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=a_ )['''state_dict'''] )
A__ = orig_model.diffusion_ema.eval()
A__ = orig_model.state_dict()
A__ = rename_orig_weights(a_ )
A__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(a_ ) == 0, f"Problem with {renamed_minus_diffusers}"
assert all(k.endswith('''kernel''' ) for k in list(a_ ) ), f"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
A__ = value.squeeze()
A__ = value
diffusers_model.load_state_dict(a_ )
A__ = 1_00
A__ = 33
A__ = IPNDMScheduler(num_train_timesteps=a_ )
A__ = torch.manual_seed(a_ )
A__ = torch.randn([1, 2, config.sample_size] , generator=a_ ).to(a_ )
A__ = torch.linspace(1 , 0 , steps + 1 , device=a_ )[:-1]
A__ = get_crash_schedule(a_ )
A__ = DanceDiffusionPipeline(unet=a_ , scheduler=a_ )
A__ = torch.manual_seed(33 )
A__ = pipe(num_inference_steps=a_ , generator=a_ ).audios
A__ = sampling.iplms_sample(a_ , a_ , a_ , {} )
A__ = generated.clamp(-1 , 1 )
A__ = (generated - audio).abs().sum()
A__ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , a_ )
print('''Diff max''' , a_ )
assert diff_max < 1e-3, f"Diff max: {diff_max} is too much :-/"
print(f"Conversion for {model_name} successful!" )
if __name__ == "__main__":
a__: int = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
a__: Dict = parser.parse_args()
main(args)
| 718
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a__: Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''',__lowerCamelCase,)
super().__init__(*__lowerCamelCase,**__lowerCamelCase )
| 212
| 0
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCAmelCase ( A ):
def __init__( self : Tuple , *__lowercase : List[str] , **__lowercase : List[Any] ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , 'decord' )
self.check_model_type(__lowercase )
def snake_case ( self : str , __lowercase : Union[str, Any]=None , __lowercase : str=None , __lowercase : int=None ):
"""simple docstring"""
__lowercase ={}
if frame_sampling_rate is not None:
__lowercase =frame_sampling_rate
if num_frames is not None:
__lowercase =num_frames
__lowercase ={}
if top_k is not None:
__lowercase =top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , __lowercase : Union[str, List[str]] , **__lowercase : Optional[int] ):
"""simple docstring"""
return super().__call__(__lowercase , **__lowercase )
def snake_case ( self : str , __lowercase : Tuple , __lowercase : List[Any]=None , __lowercase : int=1 ):
"""simple docstring"""
if num_frames is None:
__lowercase =self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
__lowercase =BytesIO(requests.get(__lowercase ).content )
__lowercase =VideoReader(__lowercase )
videoreader.seek(0 )
__lowercase =0
__lowercase =num_frames * frame_sampling_rate - 1
__lowercase =np.linspace(__lowercase , __lowercase , num=__lowercase , dtype=np.intaa )
__lowercase =videoreader.get_batch(__lowercase ).asnumpy()
__lowercase =list(__lowercase )
__lowercase =self.image_processor(__lowercase , return_tensors=self.framework )
return model_inputs
def snake_case ( self : int , __lowercase : int ):
"""simple docstring"""
__lowercase =self.model(**__lowercase )
return model_outputs
def snake_case ( self : Dict , __lowercase : Any , __lowercase : Optional[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowercase =self.model.config.num_labels
if self.framework == "pt":
__lowercase =model_outputs.logits.softmax(-1 )[0]
__lowercase , __lowercase =probs.topk(__lowercase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__lowercase =scores.tolist()
__lowercase =ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowercase , __lowercase )]
| 119
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Dict ):
'''simple docstring'''
__lowercase =DPTConfig()
if "large" in checkpoint_url:
__lowercase =10_24
__lowercase =40_96
__lowercase =24
__lowercase =16
__lowercase =[5, 11, 17, 23]
__lowercase =[2_56, 5_12, 10_24, 10_24]
__lowercase =(1, 3_84, 3_84)
if "ade" in checkpoint_url:
__lowercase =True
__lowercase =1_50
__lowercase ='huggingface/label-files'
__lowercase ='ade20k-id2label.json'
__lowercase =json.load(open(cached_download(hf_hub_url(lowercase__, lowercase__, repo_type='dataset' ) ), 'r' ) )
__lowercase ={int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
__lowercase =[1, 1_50, 4_80, 4_80]
return config, expected_shape
def __UpperCamelCase ( lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase =['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__, lowercase__ )
def __UpperCamelCase ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowercase =name.replace('pretrained.model', 'dpt.encoder' )
if "pretrained.model" in name:
__lowercase =name.replace('pretrained.model', 'dpt.embeddings' )
if "patch_embed" in name:
__lowercase =name.replace('patch_embed', 'patch_embeddings' )
if "pos_embed" in name:
__lowercase =name.replace('pos_embed', 'position_embeddings' )
if "attn.proj" in name:
__lowercase =name.replace('attn.proj', 'attention.output.dense' )
if "proj" in name and "project" not in name:
__lowercase =name.replace('proj', 'projection' )
if "blocks" in name:
__lowercase =name.replace('blocks', 'layer' )
if "mlp.fc1" in name:
__lowercase =name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase =name.replace('mlp.fc2', 'output.dense' )
if "norm1" in name:
__lowercase =name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
__lowercase =name.replace('norm2', 'layernorm_after' )
if "scratch.output_conv" in name:
__lowercase =name.replace('scratch.output_conv', 'head' )
if "scratch" in name:
__lowercase =name.replace('scratch', 'neck' )
if "layer1_rn" in name:
__lowercase =name.replace('layer1_rn', 'convs.0' )
if "layer2_rn" in name:
__lowercase =name.replace('layer2_rn', 'convs.1' )
if "layer3_rn" in name:
__lowercase =name.replace('layer3_rn', 'convs.2' )
if "layer4_rn" in name:
__lowercase =name.replace('layer4_rn', 'convs.3' )
if "refinenet" in name:
__lowercase =int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowercase =name.replace(F'''refinenet{layer_idx}''', F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__lowercase =name.replace('out_conv', 'projection' )
if "resConfUnit1" in name:
__lowercase =name.replace('resConfUnit1', 'residual_layer1' )
if "resConfUnit2" in name:
__lowercase =name.replace('resConfUnit2', 'residual_layer2' )
if "conv1" in name:
__lowercase =name.replace('conv1', 'convolution1' )
if "conv2" in name:
__lowercase =name.replace('conv2', 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowercase =name.replace('pretrained.act_postprocess1.0.project.0', 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowercase =name.replace('pretrained.act_postprocess2.0.project.0', 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowercase =name.replace('pretrained.act_postprocess3.0.project.0', 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowercase =name.replace('pretrained.act_postprocess4.0.project.0', 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowercase =name.replace('pretrained.act_postprocess1.3', 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__lowercase =name.replace('pretrained.act_postprocess1.4', 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__lowercase =name.replace('pretrained.act_postprocess2.3', 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__lowercase =name.replace('pretrained.act_postprocess2.4', 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__lowercase =name.replace('pretrained.act_postprocess3.3', 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__lowercase =name.replace('pretrained.act_postprocess4.3', 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__lowercase =name.replace('pretrained.act_postprocess4.4', 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__lowercase =name.replace('pretrained', 'dpt' )
if "bn" in name:
__lowercase =name.replace('bn', 'batch_norm' )
if "head" in name:
__lowercase =name.replace('head', 'head.head' )
if "encoder.norm" in name:
__lowercase =name.replace('encoder.norm', 'layernorm' )
if "auxlayer" in name:
__lowercase =name.replace('auxlayer', 'auxiliary_head.head' )
return name
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : Optional[Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__lowercase =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowercase =in_proj_weight[: config.hidden_size, :]
__lowercase =in_proj_bias[: config.hidden_size]
__lowercase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase =in_proj_weight[
-config.hidden_size :, :
]
__lowercase =in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =Image.open(requests.get(lowercase__, stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : str, lowercase__ : List[str], lowercase__ : List[str] ):
'''simple docstring'''
__lowercase , __lowercase =get_dpt_config(lowercase__ )
# load original state_dict from URL
__lowercase =torch.hub.load_state_dict_from_url(lowercase__, map_location='cpu' )
# remove certain keys
remove_ignore_keys_(lowercase__ )
# rename keys
for key in state_dict.copy().keys():
__lowercase =state_dict.pop(lowercase__ )
__lowercase =val
# read in qkv matrices
read_in_q_k_v(lowercase__, lowercase__ )
# load HuggingFace model
__lowercase =DPTForSemanticSegmentation(lowercase__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# Check outputs on an image
__lowercase =4_80 if 'ade' in checkpoint_url else 3_84
__lowercase =DPTImageProcessor(size=lowercase__ )
__lowercase =prepare_img()
__lowercase =image_processor(lowercase__, return_tensors='pt' )
# forward pass
__lowercase =model(**lowercase__ ).logits if 'ade' in checkpoint_url else model(**lowercase__ ).predicted_depth
# Assert logits
__lowercase =torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
__lowercase =torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowercase__ )
assert (
torch.allclose(outputs[0, 0, :3, :3], lowercase__, atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], lowercase__ )
)
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowercase__, lowercase__ ), organization='nielsr', commit_message='Add model', use_temp_dir=lowercase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase__, lowercase__ ), organization='nielsr', commit_message='Add image processor', use_temp_dir=lowercase__, )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCAmelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 119
| 1
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ :
def __init__( self : str,__A : Tuple,__A : int=2,__A : List[str]=3,__A : Dict=4,__A : str=2,__A : Tuple=7,__A : List[str]=True,__A : int=True,__A : List[Any]=True,__A : Tuple=True,__A : Optional[Any]=9_9,__A : Optional[int]=3_6,__A : str=3,__A : Tuple=4,__A : Tuple=3_7,__A : Any="gelu",__A : Any=0.1,__A : Any=0.1,__A : Optional[int]=5_1_2,__A : Optional[int]=1_6,__A : int=2,__A : str=0.02,__A : Optional[int]=6,__A : Dict=6,__A : Optional[Any]=3,__A : Any=4,__A : int=None,__A : List[Any]=1_0_0_0,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : str = image_size
_lowerCamelCase : Dict = patch_size
_lowerCamelCase : Any = text_seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Any = use_input_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Optional[Any] = type_sequence_label_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = coordinate_size
_lowerCamelCase : int = shape_size
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : List[Any] = scope
_lowerCamelCase : List[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCamelCase : Dict = text_seq_length
_lowerCamelCase : int = (image_size // patch_size) ** 2 + 1
_lowerCamelCase : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4],self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase : Optional[Any] = bbox[i, j, 3]
_lowerCamelCase : Tuple = bbox[i, j, 1]
_lowerCamelCase : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase : Union[str, Any] = bbox[i, j, 2]
_lowerCamelCase : Optional[Any] = bbox[i, j, 0]
_lowerCamelCase : Optional[int] = t
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : str = ids_tensor([self.batch_size, self.text_seq_length],self.type_vocab_size )
_lowerCamelCase : int = None
_lowerCamelCase : Dict = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.text_seq_length],self.num_labels )
_lowerCamelCase : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,coordinate_size=self.coordinate_size,shape_size=self.shape_size,input_size=self.image_size,patch_size=self.patch_size,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : Dict,__A : int,__A : str,__A : str,__A : List[Any],__A : List[Any],__A : Any,__A : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Union[str, Any] = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
_lowerCamelCase : Tuple = model(__A,pixel_values=__A )
_lowerCamelCase : int = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A )
_lowerCamelCase : Optional[Any] = model(__A,bbox=__A,pixel_values=__A,token_type_ids=__A )
_lowerCamelCase : Any = model(__A,bbox=__A,pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCamelCase : str = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCamelCase : Tuple = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Tuple,__A : Optional[int],__A : Any,__A : Optional[Any],__A : Dict,__A : List[Any],__A : str,__A : List[str],__A : Union[str, Any] ):
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Union[str, Any] = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[Any] = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A,labels=__A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int,__A : int,__A : Optional[Any],__A : Optional[Any],__A : Optional[int],__A : Dict,__A : str,__A : Optional[int],__A : Optional[Any] ):
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : str = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A,labels=__A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase_ ( self : str,__A : int,__A : Tuple,__A : Tuple,__A : Dict,__A : Optional[Any],__A : Optional[int],__A : Union[str, Any],__A : List[Any] ):
_lowerCamelCase : Optional[Any] = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A,start_positions=__A,end_positions=__A,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self : Tuple,__A : List[str],__A : Tuple,__A : Optional[int],__A : Optional[Any],__A : Tuple ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = LayoutLMvaModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : str,__A : Dict=False ):
_lowerCamelCase : List[Any] = copy.deepcopy(__A )
if model_class in get_values(__A ):
_lowerCamelCase : Dict = {
k: v.unsqueeze(1 ).expand(-1,self.model_tester.num_choices,-1 ).contiguous()
if isinstance(__A,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
_lowerCamelCase : Optional[Any] = torch.ones(self.model_tester.batch_size,dtype=torch.long,device=__A )
elif model_class in get_values(__A ):
_lowerCamelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
_lowerCamelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
elif model_class in [
*get_values(__A ),
]:
_lowerCamelCase : Any = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
elif model_class in [
*get_values(__A ),
]:
_lowerCamelCase : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length),dtype=torch.long,device=__A,)
return inputs_dict
def lowerCamelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__A )
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=__A,return_tensors="pt" ).pixel_values.to(__A )
_lowerCamelCase : str = torch.tensor([[1, 2]] )
_lowerCamelCase : Union[str, Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCamelCase : Optional[int] = model(
input_ids=input_ids.to(__A ),bbox=bbox.to(__A ),pixel_values=pixel_values.to(__A ),)
# verify the logits
_lowerCamelCase : List[str] = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape,__A )
_lowerCamelCase : int = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3],__A,atol=1e-4 ) )
| 711
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A )
| 11
| 0
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( __lowercase ):
def lowerCAmelCase__ ( self: str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , "num_heads" ) )
class A :
def __init__( self: str , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str=13 , _lowerCAmelCase: List[Any]=64 , _lowerCAmelCase: str=3 , _lowerCAmelCase: str=[16, 48, 96] , _lowerCAmelCase: Union[str, Any]=[1, 3, 6] , _lowerCAmelCase: Union[str, Any]=[1, 2, 10] , _lowerCAmelCase: List[Any]=[7, 3, 3] , _lowerCAmelCase: Dict=[4, 2, 2] , _lowerCAmelCase: Optional[int]=[2, 1, 1] , _lowerCAmelCase: List[str]=[2, 2, 2] , _lowerCAmelCase: Optional[int]=[False, False, True] , _lowerCAmelCase: Any=[0.0, 0.0, 0.0] , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Any=1e-12 , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: List[Any]=2 , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_sizes
UpperCAmelCase_ =patch_stride
UpperCAmelCase_ =patch_padding
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =num_labels
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =embed_dim
UpperCAmelCase_ =num_heads
UpperCAmelCase_ =stride_kv
UpperCAmelCase_ =depth
UpperCAmelCase_ =cls_token
UpperCAmelCase_ =attention_drop_rate
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =layer_norm_eps
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =TFCvtModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ =image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ =floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ =floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.num_labels
UpperCAmelCase_ =TFCvtForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFCvtModelTester(self )
UpperCAmelCase_ =TFCvtConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: str ) -> Optional[int]:
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Tuple ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(_lowerCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: int ):
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase_ =outputs.hidden_states
UpperCAmelCase_ =len(self.model_tester.depth )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ =TFCvtModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCAmelCase , atol=1e-4 ) )
| 54
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=32 , SCREAMING_SNAKE_CASE_ : Dict=16 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : List[Any]=32 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : int=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Dict=37 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0_2 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : str=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = backbone_out_indices
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = backbone_featmap_shape
lowerCamelCase__ = scope
lowerCamelCase__ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ = (image_size // patch_size) ** 2
lowerCamelCase__ = num_patches + 1
def __UpperCAmelCase ( self : Dict ):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCamelCase__ = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
snake_case = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = DPTModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __UpperCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCAmelCase ( self : Union[str, Any] ):
pass
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def __UpperCAmelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __UpperCAmelCase ( self : str ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = False
lowerCamelCase__ = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __UpperCAmelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
lowerCamelCase__ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowerCamelCase__ = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCAmelCase ( self : Optional[Any] ):
pass
@slow
def __UpperCAmelCase ( self : str ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowerCamelCase__ = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = """add"""
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
lowerCamelCase__ = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = outputs.predicted_depth
# verify the predicted depth
lowerCamelCase__ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 258
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def _A ( __lowercase , __lowercase , __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCamelCase__ , lowerCamelCase__ = True, True
lowerCamelCase__ = dfs(__lowercase , __lowercase , __lowercase , __lowercase )
return path
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = 0
lowerCamelCase__ = -1
for i in range(__lowercase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCamelCase__ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCamelCase__ , lowerCamelCase__ = check_circuit_or_path(__lowercase , __lowercase )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
lowerCamelCase__ = 1
if check == 2:
lowerCamelCase__ = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
lowerCamelCase__ = dfs(__lowercase , __lowercase , __lowercase )
print(__lowercase )
def _A ( ):
"""simple docstring"""
lowerCamelCase__ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCamelCase__ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCamelCase__ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCamelCase__ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCamelCase__ = {
1: [],
2: []
# all degree is zero
}
lowerCamelCase__ = 10
check_euler(__lowercase , __lowercase )
check_euler(__lowercase , __lowercase )
check_euler(__lowercase , __lowercase )
check_euler(__lowercase , __lowercase )
check_euler(__lowercase , __lowercase )
if __name__ == "__main__":
main()
| 258
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , lowercase__ ) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
SCREAMING_SNAKE_CASE : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = 'sgugger/tiny-distilbert-classification'
SCREAMING_SNAKE_CASE : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , only_pretrain_model=lowercase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Dict = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , torchscript=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , fpaa=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowercase__ )
# set architectures equal to `None`
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Tuple = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase__ , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Dict = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Any = 'sshleifer/tinier_bart'
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : List[str] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'sshleifer/tinier_bart'
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : List[str] = PyTorchBenchmark(lowercase__ , configs=[config] )
SCREAMING_SNAKE_CASE : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , save_to_csv=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase__ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase__ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase__ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase__ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase__ , 'env.csv' ) , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Any = PyTorchBenchmark(lowercase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase__ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ , 'env.csv' ) ).exists() )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase__ ):
self.assertTrue(hasattr(lowercase__ , 'sequential' ) )
self.assertTrue(hasattr(lowercase__ , 'cumulative' ) )
self.assertTrue(hasattr(lowercase__ , 'current' ) )
self.assertTrue(hasattr(lowercase__ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase__ , inference=lowercase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase__ , 'log.txt' ) , log_print=lowercase__ , trace_memory_line_by_line=lowercase__ , multi_process=lowercase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = PyTorchBenchmark(lowercase__ )
SCREAMING_SNAKE_CASE : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase__ , 'log.txt' ) ).exists() )
| 251
| 0
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowerCAmelCase(a : int , a : Any , a : int , a : Dict , a : Tuple ) -> Union[str, Any]:
# Load configuration defined in the metadata file
with open(__lowerCAmelCase ) as metadata_file:
_SCREAMING_SNAKE_CASE =json.load(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =LukeConfig(use_entity_aware_attention=__lowerCAmelCase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_SCREAMING_SNAKE_CASE =torch.load(__lowerCAmelCase , map_location='''cpu''' )["""module"""]
# Load the entity vocab file
_SCREAMING_SNAKE_CASE =load_original_entity_vocab(__lowerCAmelCase )
# add an entry for [MASK2]
_SCREAMING_SNAKE_CASE =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_SCREAMING_SNAKE_CASE =AddedToken('''<ent>''' , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =AddedToken('''<ent2>''' , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''r''' ) as f:
_SCREAMING_SNAKE_CASE =json.load(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE ="""MLukeTokenizer"""
with open(os.path.join(__lowerCAmelCase , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE =MLukeTokenizer.from_pretrained(__lowerCAmelCase )
# Initialize the embeddings of the special tokens
_SCREAMING_SNAKE_CASE =tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_SCREAMING_SNAKE_CASE =tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_SCREAMING_SNAKE_CASE =state_dict["""embeddings.word_embeddings.weight"""]
_SCREAMING_SNAKE_CASE =word_emb[ent_init_index].unsqueeze(0 )
_SCREAMING_SNAKE_CASE =word_emb[enta_init_index].unsqueeze(0 )
_SCREAMING_SNAKE_CASE =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_SCREAMING_SNAKE_CASE =state_dict[bias_name]
_SCREAMING_SNAKE_CASE =decoder_bias[ent_init_index].unsqueeze(0 )
_SCREAMING_SNAKE_CASE =decoder_bias[enta_init_index].unsqueeze(0 )
_SCREAMING_SNAKE_CASE =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_SCREAMING_SNAKE_CASE =f"""encoder.layer.{layer_index}.attention.self."""
_SCREAMING_SNAKE_CASE =state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE =state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_SCREAMING_SNAKE_CASE =state_dict["""entity_embeddings.entity_embeddings.weight"""]
_SCREAMING_SNAKE_CASE =entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_SCREAMING_SNAKE_CASE =state_dict["""entity_predictions.bias"""]
_SCREAMING_SNAKE_CASE =entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE =torch.cat([entity_prediction_bias, entity_mask_bias] )
_SCREAMING_SNAKE_CASE =LukeForMaskedLM(config=__lowerCAmelCase ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_SCREAMING_SNAKE_CASE =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_SCREAMING_SNAKE_CASE =state_dict[key]
else:
_SCREAMING_SNAKE_CASE =state_dict[key]
_SCREAMING_SNAKE_CASE =model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if set(__lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_SCREAMING_SNAKE_CASE =MLukeTokenizer.from_pretrained(__lowerCAmelCase , task='''entity_classification''' )
_SCREAMING_SNAKE_CASE ="""ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
_SCREAMING_SNAKE_CASE =(0, 9)
_SCREAMING_SNAKE_CASE =tokenizer(__lowerCAmelCase , entity_spans=[span] , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =model(**__lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_SCREAMING_SNAKE_CASE =torch.Size((1, 33, 768) )
_SCREAMING_SNAKE_CASE =torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_SCREAMING_SNAKE_CASE =torch.Size((1, 1, 768) )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_SCREAMING_SNAKE_CASE =MLukeTokenizer.from_pretrained(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE ="""Tokyo is the capital of <mask>."""
_SCREAMING_SNAKE_CASE =(24, 30)
_SCREAMING_SNAKE_CASE =tokenizer(__lowerCAmelCase , entity_spans=[span] , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =model(**__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =encoding["""input_ids"""][0].tolist()
_SCREAMING_SNAKE_CASE =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_SCREAMING_SNAKE_CASE =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE =outputs.entity_logits[0][0].argmax().item()
_SCREAMING_SNAKE_CASE =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__lowerCAmelCase ) )
model.save_pretrained(__lowerCAmelCase )
def _lowerCAmelCase(a : Tuple ) -> str:
_SCREAMING_SNAKE_CASE =["""[MASK]""", """[PAD]""", """[UNK]"""]
_SCREAMING_SNAKE_CASE =[json.loads(__lowerCAmelCase ) for line in open(__lowerCAmelCase )]
_SCREAMING_SNAKE_CASE ={}
for entry in data:
_SCREAMING_SNAKE_CASE =entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_SCREAMING_SNAKE_CASE =entity_id
break
_SCREAMING_SNAKE_CASE =f"""{language}:{entity_name}"""
_SCREAMING_SNAKE_CASE =entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCAmelCase_ : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 712
|
"""simple docstring"""
def _lowerCAmelCase(a : int ) -> bool:
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
| 0
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ : Any = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCAmelCase : bool = None
__lowerCAmelCase : bool = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCAmelCase : Optional[Any] = datasets.Audio()
__lowerCAmelCase : Union[str, Any] = 'audio'
__lowerCAmelCase : str = AudioFolderConfig
__lowerCAmelCase : List[str] # definition at the bottom of the script
__lowerCAmelCase : Optional[int] = AudioClassification(audio_column='audio' , label_column='label' )
lowerCamelCase__ : int = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCamelCase__ : int = AUDIO_EXTENSIONS
| 12
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "encoder-decoder"
lowerCAmelCase : int = True
def __init__( self : Optional[int] ,**_snake_case : Tuple ) -> Tuple:
"""simple docstring"""
super().__init__(**_snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase__ : Tuple = kwargs.pop('''encoder''' )
lowercase__ : Dict = encoder_config.pop('''model_type''' )
lowercase__ : Union[str, Any] = kwargs.pop('''decoder''' )
lowercase__ : Any = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__ : Optional[int] = AutoConfig.for_model(_snake_case ,**_snake_case )
lowercase__ : Dict = AutoConfig.for_model(_snake_case ,**_snake_case )
lowercase__ : List[str] = True
@classmethod
def UpperCAmelCase ( cls : str ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Optional[Any] ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase__ : Dict = True
lowercase__ : Any = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : List[Any] = self.encoder.to_dict()
lowercase__ : List[Any] = self.decoder.to_dict()
lowercase__ : Optional[int] = self.__class__.model_type
return output
| 560
| 0
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = 3
lowerCAmelCase__ = 250
lowerCAmelCase__ = ids_tensor((batch_size, length) ,a_ )
lowerCAmelCase__ = torch.ones((batch_size, length) ,device=a_ ,dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self._get_tensors(5 )
lowerCAmelCase__ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a_ ,a_ ) )
lowerCAmelCase__ , lowerCAmelCase__ = self._get_tensors(9 )
self.assertFalse(criteria(a_ ,a_ ) )
lowerCAmelCase__ , lowerCAmelCase__ = self._get_tensors(10 )
self.assertTrue(criteria(a_ ,a_ ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = MaxLengthCriteria(max_length=10 )
lowerCAmelCase__ , lowerCAmelCase__ = self._get_tensors(5 )
self.assertFalse(criteria(a_ ,a_ ) )
lowerCAmelCase__ , lowerCAmelCase__ = self._get_tensors(9 )
self.assertFalse(criteria(a_ ,a_ ) )
lowerCAmelCase__ , lowerCAmelCase__ = self._get_tensors(10 )
self.assertTrue(criteria(a_ ,a_ ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = MaxNewTokensCriteria(start_length=5 ,max_new_tokens=5 )
lowerCAmelCase__ , lowerCAmelCase__ = self._get_tensors(5 )
self.assertFalse(criteria(a_ ,a_ ) )
lowerCAmelCase__ , lowerCAmelCase__ = self._get_tensors(9 )
self.assertFalse(criteria(a_ ,a_ ) )
lowerCAmelCase__ , lowerCAmelCase__ = self._get_tensors(10 )
self.assertTrue(criteria(a_ ,a_ ) )
lowerCAmelCase__ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length ,10 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self._get_tensors(5 )
lowerCAmelCase__ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a_ ,a_ ) )
lowerCAmelCase__ = MaxTimeCriteria(max_time=0.1 ,initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a_ ,a_ ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) ,10 )
with self.assertWarns(a_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) ,11 )
lowerCAmelCase__ = validate_stopping_criteria(StoppingCriteriaList() ,11 )
self.assertEqual(len(a_ ) ,1 )
| 604
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = "x" , snake_case__ = 10**-10 , snake_case__ = 1 , ) -> complex:
"""simple docstring"""
lowerCAmelCase__ = symbols(snake_case__ )
lowerCAmelCase__ = lambdify(snake_case__ , snake_case__ )
lowerCAmelCase__ = lambdify(snake_case__ , diff(snake_case__ , snake_case__ ) )
lowerCAmelCase__ = starting_point
while True:
if diff_function(snake_case__ ) != 0:
lowerCAmelCase__ = prev_guess - multiplicity * func(snake_case__ ) / diff_function(
snake_case__ )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCAmelCase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 604
| 1
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'conditional_detr'
__lowerCamelCase = ['past_key_values']
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any]=True , _lowercase :str=None , _lowercase :Dict=3 , _lowercase :int=3_00 , _lowercase :Tuple=6 , _lowercase :List[str]=20_48 , _lowercase :Union[str, Any]=8 , _lowercase :Any=6 , _lowercase :Tuple=20_48 , _lowercase :str=8 , _lowercase :Any=0.0 , _lowercase :int=0.0 , _lowercase :Any=True , _lowercase :Tuple="relu" , _lowercase :Optional[Any]=2_56 , _lowercase :int=0.1 , _lowercase :Dict=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Union[str, Any]=0.02 , _lowercase :List[str]=1.0 , _lowercase :Optional[int]=False , _lowercase :List[Any]="sine" , _lowercase :str="resnet50" , _lowercase :int=True , _lowercase :str=False , _lowercase :List[str]=2 , _lowercase :int=5 , _lowercase :Union[str, Any]=2 , _lowercase :Any=1 , _lowercase :int=1 , _lowercase :Union[str, Any]=2 , _lowercase :Optional[Any]=5 , _lowercase :List[Any]=2 , _lowercase :Any=0.25 , **_lowercase :str , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_lowercase , _lowercase ):
lowercase__ = backbone_config.get("model_type" )
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(_lowercase )
lowercase__ = use_timm_backbone
lowercase__ = backbone_config
lowercase__ = num_channels
lowercase__ = num_queries
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = init_xavier_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = encoder_layers
lowercase__ = auxiliary_loss
lowercase__ = position_embedding_type
lowercase__ = backbone
lowercase__ = use_pretrained_backbone
lowercase__ = dilation
# Hungarian matcher
lowercase__ = class_cost
lowercase__ = bbox_cost
lowercase__ = giou_cost
# Loss coefficients
lowercase__ = mask_loss_coefficient
lowercase__ = dice_loss_coefficient
lowercase__ = cls_loss_coefficient
lowercase__ = bbox_loss_coefficient
lowercase__ = giou_loss_coefficient
lowercase__ = focal_alpha
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = version.parse('1.11' )
@property
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self :int ):
'''simple docstring'''
return 12
| 655
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'WhisperFeatureExtractor'
_snake_case : int = 'WhisperTokenizer'
def __init__( self : Optional[int] , A_ : Optional[int] , A_ : Any )-> Dict:
super().__init__(A_ , A_ )
__UpperCamelCase = self.feature_extractor
__UpperCamelCase = False
def A ( self : Union[str, Any] , A_ : Optional[int]=None , A_ : Optional[Any]=None , A_ : str=True )-> Optional[Any]:
return self.tokenizer.get_decoder_prompt_ids(task=A_ , language=A_ , no_timestamps=A_ )
def __call__( self : Any , *A_ : Optional[int] , **A_ : Tuple )-> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
__UpperCamelCase = kwargs.pop("audio" , A_ )
__UpperCamelCase = kwargs.pop("sampling_rate" , A_ )
__UpperCamelCase = kwargs.pop("text" , A_ )
if len(A_ ) > 0:
__UpperCamelCase = args[0]
__UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__UpperCamelCase = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if text is not None:
__UpperCamelCase = self.tokenizer(A_ , **A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCamelCase = encodings["input_ids"]
return inputs
def A ( self : List[Any] , *A_ : Optional[int] , **A_ : int )-> List[Any]:
return self.tokenizer.batch_decode(*A_ , **A_ )
def A ( self : Optional[int] , *A_ : Dict , **A_ : List[str] )-> str:
return self.tokenizer.decode(*A_ , **A_ )
def A ( self : str , A_ : str , A_ : str="np" )-> Union[str, Any]:
return self.tokenizer.get_prompt_ids(A_ , return_tensors=A_ )
| 228
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_A = random.Random()
def lowercase (_snake_case ,_snake_case=1.0 ,_snake_case=None ,_snake_case=None ) -> int:
'''simple docstring'''
if rng is None:
__UpperCamelCase = global_rng
__UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , A_ : Union[str, Any] , A_ : Optional[Any]=7 , A_ : Optional[int]=4_00 , A_ : Dict=20_00 , A_ : Optional[Any]=1 , A_ : List[str]=0.0 , A_ : Any=1_60_00 , A_ : Optional[Any]=True , A_ : Optional[int]=80 , A_ : Any=16 , A_ : Any=64 , A_ : Union[str, Any]="hann_window" , A_ : int=80 , A_ : Optional[Any]=76_00 , A_ : str=1e-1_0 , A_ : int=True , )-> List[str]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = min_seq_length
__UpperCamelCase = max_seq_length
__UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase = feature_size
__UpperCamelCase = padding_value
__UpperCamelCase = sampling_rate
__UpperCamelCase = do_normalize
__UpperCamelCase = num_mel_bins
__UpperCamelCase = hop_length
__UpperCamelCase = win_length
__UpperCamelCase = win_function
__UpperCamelCase = fmin
__UpperCamelCase = fmax
__UpperCamelCase = mel_floor
__UpperCamelCase = return_attention_mask
def A ( self : Dict )-> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def A ( self : Union[str, Any] , A_ : Dict=False , A_ : Optional[int]=False )-> List[Any]:
def _flatten(A_ : Union[str, Any] ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__UpperCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
def A ( self : Dict , A_ : List[Any]=False , A_ : Dict=False )-> Union[str, Any]:
if equal_length:
__UpperCamelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = SpeechTaFeatureExtractor
def A ( self : List[Any] )-> Dict:
__UpperCamelCase = SpeechTaFeatureExtractionTester(self )
def A ( self : Dict , A_ : Union[str, Any] )-> int:
self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) )
def A ( self : List[Any] )-> List[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCamelCase = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
__UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
__UpperCamelCase = feat_extract(A_ , return_tensors="np" ).input_values
__UpperCamelCase = feat_extract(A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def A ( self : Union[str, Any] )-> List[str]:
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCamelCase = ["longest", "max_length", "do_not_pad"]
__UpperCamelCase = [None, 16_00, None]
for max_length, padding in zip(A_ , A_ ):
__UpperCamelCase = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors="np" )
__UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def A ( self : int )-> Optional[Any]:
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase = range(8_00 , 14_00 , 2_00 )
__UpperCamelCase = [floats_list((1, x) )[0] for x in lengths]
__UpperCamelCase = ["longest", "max_length", "do_not_pad"]
__UpperCamelCase = [None, 16_00, None]
for max_length, padding in zip(A_ , A_ ):
__UpperCamelCase = feat_extract(A_ , max_length=A_ , padding=A_ )
__UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def A ( self : str )-> Tuple:
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCamelCase = feat_extract(
A_ , truncation=A_ , max_length=10_00 , padding="max_length" , return_tensors="np" )
__UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A ( self : Union[str, Any] )-> Dict:
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCamelCase = feat_extract(
A_ , truncation=A_ , max_length=10_00 , padding="longest" , return_tensors="np" )
__UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
__UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCamelCase = feat_extract(
A_ , truncation=A_ , max_length=20_00 , padding="longest" , return_tensors="np" )
__UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def A ( self : List[str] )-> int:
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase = np.random.rand(1_00 ).astype(np.floataa )
__UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__UpperCamelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def A ( self : Tuple )-> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCamelCase = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase = feature_extractor(audio_target=A_ , padding=A_ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__UpperCamelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
__UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
__UpperCamelCase = feature_extractor(A_ , return_tensors="np" ).input_values
__UpperCamelCase = feature_extractor(A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__UpperCamelCase = np.asarray(A_ )
__UpperCamelCase = feature_extractor(A_ , return_tensors="np" ).input_values
__UpperCamelCase = feature_extractor(A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def A ( self : Union[str, Any] )-> Tuple:
__UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCamelCase = feat_extract.model_input_names[0]
__UpperCamelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(A_ ) == len(A_ ) for x, y in zip(A_ , processed_features[input_name] ) ) )
__UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=A_ )
__UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
__UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A ( self : Dict )-> Union[str, Any]:
__UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=A_ )
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCamelCase = feat_extract.model_input_names[0]
__UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
__UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A ( self : Optional[int] )-> Optional[int]:
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
__UpperCamelCase = feat_extract.model_input_names[0]
__UpperCamelCase = BatchFeature({input_name: speech_inputs} )
__UpperCamelCase = feat_extract.num_mel_bins # hack!
__UpperCamelCase = feat_extract.pad(A_ , padding="longest" , return_tensors="np" )[input_name]
__UpperCamelCase = feat_extract.pad(A_ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A ( self : str )-> List[str]:
__UpperCamelCase = self.feat_extract_dict
__UpperCamelCase = True
__UpperCamelCase = self.feature_extraction_class(**A_ )
__UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
__UpperCamelCase = [len(A_ ) for x in speech_inputs]
__UpperCamelCase = feat_extract.model_input_names[0]
__UpperCamelCase = BatchFeature({input_name: speech_inputs} )
__UpperCamelCase = feat_extract.num_mel_bins # hack!
__UpperCamelCase = feat_extract.pad(A_ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , A_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , A_ )
def A ( self : Optional[Any] )-> Union[str, Any]:
__UpperCamelCase = self.feat_extract_dict
__UpperCamelCase = True
__UpperCamelCase = self.feature_extraction_class(**A_ )
__UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
__UpperCamelCase = [len(A_ ) for x in speech_inputs]
__UpperCamelCase = feat_extract.model_input_names[0]
__UpperCamelCase = BatchFeature({input_name: speech_inputs} )
__UpperCamelCase = min(A_ )
__UpperCamelCase = feat_extract.num_mel_bins # hack!
__UpperCamelCase = feat_extract.pad(
A_ , padding="max_length" , max_length=A_ , truncation=A_ , return_tensors="np" )
self.assertIn("attention_mask" , A_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def A ( self : int , A_ : int )-> Optional[int]:
from datasets import load_dataset
__UpperCamelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__UpperCamelCase = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def A ( self : List[str] )-> List[Any]:
# fmt: off
__UpperCamelCase = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
__UpperCamelCase = self._load_datasamples(1 )
__UpperCamelCase = SpeechTaFeatureExtractor()
__UpperCamelCase = feature_extractor(A_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , A_ , atol=1e-6 ) )
def A ( self : Optional[Any] )-> int:
# fmt: off
__UpperCamelCase = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
__UpperCamelCase = self._load_datasamples(1 )
__UpperCamelCase = SpeechTaFeatureExtractor()
__UpperCamelCase = feature_extractor(audio_target=A_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
| 228
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'xlm'
__snake_case = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , __lowerCamelCase=3_0_1_4_5 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=1 , __lowerCamelCase=True , __lowerCamelCase=5_1_2 , __lowerCamelCase=2_0_4_8**-0.5 , __lowerCamelCase=1E-12 , __lowerCamelCase=0.02 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=5 , __lowerCamelCase=True , __lowerCamelCase="first" , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=0.1 , __lowerCamelCase=5 , __lowerCamelCase=5 , __lowerCamelCase=0 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=0 , **__lowerCamelCase , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = emb_dim
_SCREAMING_SNAKE_CASE : List[str] = n_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = n_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = dropout
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
_SCREAMING_SNAKE_CASE : Tuple = gelu_activation
_SCREAMING_SNAKE_CASE : int = sinusoidal_embeddings
_SCREAMING_SNAKE_CASE : str = causal
_SCREAMING_SNAKE_CASE : Union[str, Any] = asm
_SCREAMING_SNAKE_CASE : List[str] = n_langs
_SCREAMING_SNAKE_CASE : Optional[int] = use_lang_emb
_SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
_SCREAMING_SNAKE_CASE : Tuple = bos_index
_SCREAMING_SNAKE_CASE : Union[str, Any] = eos_index
_SCREAMING_SNAKE_CASE : str = pad_index
_SCREAMING_SNAKE_CASE : Tuple = unk_index
_SCREAMING_SNAKE_CASE : List[Any] = mask_index
_SCREAMING_SNAKE_CASE : List[str] = is_encoder
_SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = embed_init_std
_SCREAMING_SNAKE_CASE : str = init_std
_SCREAMING_SNAKE_CASE : Optional[int] = summary_type
_SCREAMING_SNAKE_CASE : int = summary_use_proj
_SCREAMING_SNAKE_CASE : List[str] = summary_activation
_SCREAMING_SNAKE_CASE : Dict = summary_proj_to_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = summary_first_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = start_n_top
_SCREAMING_SNAKE_CASE : Tuple = end_n_top
_SCREAMING_SNAKE_CASE : str = mask_token_id
_SCREAMING_SNAKE_CASE : Any = lang_id
if "n_words" in kwargs:
_SCREAMING_SNAKE_CASE : Any = kwargs["n_words"]
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 249
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 176
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : int = 10_24
SCREAMING_SNAKE_CASE_ : Dict = 40_96
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
SCREAMING_SNAKE_CASE_ : Any = 16
SCREAMING_SNAKE_CASE_ : int = [5, 11, 17, 23]
SCREAMING_SNAKE_CASE_ : List[str] = [2_56, 5_12, 10_24, 10_24]
SCREAMING_SNAKE_CASE_ : Optional[int] = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[str] = 7_68
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 1, 1, 0.5]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [2_56, 5_12, 7_68, 7_68]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1_50
SCREAMING_SNAKE_CASE_ : str = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = (1, 3_84, 3_84)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Tuple = '''project'''
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Dict = 7_68
SCREAMING_SNAKE_CASE_ : Tuple = [1, 1, 1, 0.5]
SCREAMING_SNAKE_CASE_ : List[str] = 1_50
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 16
SCREAMING_SNAKE_CASE_ : Tuple = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE_ : List[str] = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ : Dict = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = idalabel
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Any = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def _A (__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__a , __a )
def _A (__a ) -> Optional[int]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
SCREAMING_SNAKE_CASE_ : Dict = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE_ : int = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _A (__a , __a ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : Optional[Any] = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ : Any = in_proj_bias[-config.hidden_size :]
def _A () -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def _A (__a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = get_dpt_config(__a )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
SCREAMING_SNAKE_CASE_ : int = torch.load(__a , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : List[Any] = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : Dict = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTForSemanticSegmentation(__a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE_ : Dict = 4_80 if '''ade''' in checkpoint_url else 3_84
SCREAMING_SNAKE_CASE_ : Optional[Any] = DPTImageProcessor(size=__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(__a , return_tensors='''pt''' )
# forward pass
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**__a ).logits if '''ade''' in checkpoint_url else model(**__a ).predicted_depth
if show_prediction:
SCREAMING_SNAKE_CASE_ : List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=__a , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 176
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ = """Pix2StructImageProcessor"""
lowerCAmelCase_ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = False
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 2048 , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , )-> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCamelCase = self.tokenizer
UpperCamelCase = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCamelCase = self.image_processor(
A_ , return_tensors=A_ , max_patches=A_ , **A_ )
else:
# add pixel_values and bbox
UpperCamelCase = self.image_processor(
A_ , return_tensors=A_ , max_patches=A_ , header_text=A_ , **A_ )
if text is not None and not self.image_processor.is_vqa:
UpperCamelCase = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
if "attention_mask" in text_encoding:
UpperCamelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
UpperCamelCase = text_encoding.pop('input_ids' )
else:
UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def UpperCAmelCase_ ( self , *A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def UpperCAmelCase_ ( self , *A_ , **A_ )-> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 3
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case = 'base_with_context'
def _a ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase = weights[F"""layers_{lyr_num}"""]
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__UpperCamelCase = ly_weight['attention']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _a ( __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase = weights[F"""layers_{lyr_num}"""]
__UpperCamelCase = ly_weight['attention']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _a ( __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__UpperCamelCase = weights[F"""layers_{lyr_num}"""]
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__UpperCamelCase = ly_weight['self_attention']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = ly_weight['MultiHeadDotProductAttention_0']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _a ( __lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__UpperCamelCase = jnp.tree_util.tree_map(onp.array , __lowercase )
__UpperCamelCase = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__UpperCamelCase = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__UpperCamelCase = inference.parse_training_gin_file(__lowercase , __lowercase )
__UpperCamelCase = inference.InferenceModel(args.checkpoint_path , __lowercase )
__UpperCamelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__UpperCamelCase = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __lowercase )
__UpperCamelCase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __lowercase )
__UpperCamelCase = load_decoder(ta_checkpoint['target']['decoder'] , __lowercase )
__UpperCamelCase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
_snake_case = parser.parse_args()
main(args)
| 383
| 0
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
super().__init__()
__magic_name__ : str = nn.ModuleList(lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = True , ) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase , lowerCamelCase , self.nets ) ):
__magic_name__ : Tuple = controlnet(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# merge samples
if i == 0:
__magic_name__ : int = down_samples, mid_sample
else:
__magic_name__ : List[str] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase , lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowercase ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : int = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase , is_main_process=lowerCamelCase , save_function=lowerCamelCase , safe_serialization=lowerCamelCase , variant=lowerCamelCase , )
idx += 1
__magic_name__ : Optional[int] = model_path_to_save + F'''_{idx}'''
@classmethod
def lowercase ( cls , lowerCamelCase , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : List[str] = 0
__magic_name__ : Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__magic_name__ : Any = pretrained_model_path
while os.path.isdir(lowerCamelCase ):
__magic_name__ : int = ControlNetModel.from_pretrained(lowerCamelCase , **lowerCamelCase )
controlnets.append(lowerCamelCase )
idx += 1
__magic_name__ : str = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(lowerCamelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowerCamelCase ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(lowerCamelCase )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(lowerCamelCase )
| 708
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = 16 ) ->Any:
"""simple docstring"""
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__magic_name__ : Tuple = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : Any = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ : Tuple = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : str = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ : Any = 16
elif accelerator.mixed_precision != "no":
__magic_name__ : Union[str, Any] = 8
else:
__magic_name__ : Optional[Any] = None
return tokenizer.pad(
UpperCAmelCase, padding='''longest''', max_length=UpperCAmelCase, pad_to_multiple_of=UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
__magic_name__ : List[Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=UpperCAmelCase )
__magic_name__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=(accelerator.mixed_precision == '''fp8'''), )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : int = config['''lr''']
__magic_name__ : Any = int(config['''num_epochs'''] )
__magic_name__ : List[str] = int(config['''seed'''] )
__magic_name__ : Optional[int] = int(config['''batch_size'''] )
__magic_name__ : Optional[Any] = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__magic_name__ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
__magic_name__ , __magic_name__ : int = get_dataloaders(UpperCAmelCase, UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ : Any = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ : int = AdamW(params=model.parameters(), lr=UpperCAmelCase )
# Instantiate scheduler
__magic_name__ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : Tuple = outputs.loss
__magic_name__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : List[str] = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase, references=UpperCAmelCase, )
__magic_name__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', UpperCAmelCase )
def lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCAmelCase, default=UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
__magic_name__ : Dict = parser.parse_args()
__magic_name__ : int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 336
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Union[str, Any] , ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = 13
__lowercase : Union[str, Any] = 7
__lowercase : Any = True
__lowercase : Union[str, Any] = True
__lowercase : str = True
__lowercase : Any = 99
__lowercase : List[Any] = 32
__lowercase : Optional[Any] = 2
__lowercase : Any = 4
__lowercase : str = 37
__lowercase : List[Any] = """gelu"""
__lowercase : Dict = 0.1
__lowercase : List[str] = 0.1
__lowercase : List[str] = 512
__lowercase : List[Any] = 16
__lowercase : Dict = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Tuple = 3
__lowercase : Any = 4
__lowercase : str = None
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
__lowercase : str = None
__lowercase : Any = None
if self.use_labels:
__lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : List[str] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = self.prepare_config_and_inputs()
__lowercase : Tuple = True
__lowercase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase ( self : str , __a : Optional[int] , __a : int , __a : int , __a : Any , __a : int , __a : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = TFEsmModel(config=__a )
__lowercase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowercase : Optional[Any] = model(__a )
__lowercase : Any = [input_ids, input_mask]
__lowercase : List[str] = model(__a )
__lowercase : int = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : Optional[int] , __a : Any , __a : Optional[Any] , __a : str , __a : Optional[int] , __a : List[str] , __a : List[str] , ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = True
__lowercase : int = TFEsmModel(config=__a )
__lowercase : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
__lowercase : Union[str, Any] = model(__a )
__lowercase : Any = [input_ids, input_mask]
__lowercase : Union[str, Any] = model(__a , encoder_hidden_states=__a )
# Also check the case where encoder outputs are not passed
__lowercase : Any = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Dict , __a : Optional[int] , __a : Tuple , __a : List[str] , __a : Tuple , __a : Optional[int] , __a : List[str] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = TFEsmForMaskedLM(config=__a )
__lowercase : Union[str, Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Dict , __a : Optional[int] , __a : List[str] , __a : str , __a : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = self.num_labels
__lowercase : str = TFEsmForTokenClassification(config=__a )
__lowercase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Optional[int] = config_and_inputs
__lowercase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Optional[Any] = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = TFEsmModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[Any] = TFEsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = model_class(__a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__lowercase : Dict = model.get_bias()
assert isinstance(__a , __a )
for k, v in name.items():
assert isinstance(__a , tf.Variable )
else:
__lowercase : Optional[int] = model.get_output_embeddings()
assert x is None
__lowercase : str = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
__lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Optional[int] = model(__a )[0]
__lowercase : int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __a )
# compare the actual values for a slice.
__lowercase : List[str] = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
__lowercase : Union[str, Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Optional[Any] = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 149
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowerCamelCase : Dict = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowerCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase : Dict = dict(zip(vocab, range(len(vocab))))
lowerCamelCase : List[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Optional[int] = Path(tmpdirname)
lowerCamelCase : str = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowerCamelCase : Optional[Any] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowerCamelCase : int = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowerCamelCase : Tuple = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowerCamelCase : List[Any] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowerCamelCase : Optional[Any] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
lowerCamelCase : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowerCamelCase : List[str] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 149
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A_ :Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
A_ :List[str] = 5
A_ :List[Any] = 10
@require_sentencepiece
@require_tokenizers
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Any =SpeechaTextTokenizer
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Dict =True
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
__UpperCamelCase : str =sp.SentencePieceProcessor()
spm_model.Load(lowerCamelCase__ )
__UpperCamelCase : int =['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCamelCase__ ) )]
__UpperCamelCase : Optional[Any] =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : Dict =Path(self.tmpdirname )
save_json(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__UpperCamelCase : str =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='<pad>'
__UpperCamelCase : List[str] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCamelCase__ ) , 1001 )
def __lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__UpperCamelCase : Union[str, Any] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [289, 50, 14, 174, 386] , )
__UpperCamelCase : Union[str, Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__UpperCamelCase : Tuple =tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__UpperCamelCase : Optional[int] =tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ={'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class __A ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Any ="""valhalla/s2t_mustc_multilinguial_medium"""
UpperCamelCase__ : Any ="""C'est trop cool"""
UpperCamelCase__ : str ="""Esto es genial"""
@classmethod
def __lowercase ( cls ):
"""simple docstring"""
__UpperCamelCase : SpeechaTextTokenizer =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def __lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __lowercase ( self ):
"""simple docstring"""
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
__UpperCamelCase : str =[ES_CODE, 4, 1601, 47, 7647, 2]
__UpperCamelCase : Dict =self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : List[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] ='fr'
__UpperCamelCase : Tuple =self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , lowerCamelCase__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__UpperCamelCase : Union[str, Any] ='es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 154
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a )
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : str =field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase__ : ClassVar[Features] =Features({"""image""": Image()} )
UpperCamelCase__ : ClassVar[Features] =Features({"""labels""": ClassLabel} )
UpperCamelCase__ : str ="image"
UpperCamelCase__ : str ="labels"
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__UpperCamelCase : List[str] =copy.deepcopy(self )
__UpperCamelCase : Optional[Any] =self.label_schema.copy()
__UpperCamelCase : List[Any] =features[self.label_column]
__UpperCamelCase : Optional[int] =label_schema
return task_template
@property
def __lowercase ( self ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 154
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_lowercase : Optional[int] = TypeVar('KT')
_lowercase : Optional[int] = TypeVar('VT')
class _UpperCAmelCase ( Generic[KT, VT] ):
def __init__( self : List[Any] , _lowercase : KT | str = "root" , _lowercase : VT | None = None ):
__UpperCAmelCase = key
__UpperCAmelCase = value
__UpperCAmelCase = []
def __repr__( self : str ):
return F'''Node({self.key}: {self.value})'''
@property
def a ( self : Union[str, Any] ):
return len(self.forward )
class _UpperCAmelCase ( Generic[KT, VT] ):
def __init__( self : int , _lowercase : float = 0.5 , _lowercase : int = 16 ):
__UpperCAmelCase = Node[KT, VT]()
__UpperCAmelCase = 0
__UpperCAmelCase = p
__UpperCAmelCase = max_level
def __str__( self : Dict ):
__UpperCAmelCase = list(self )
if len(_lowercase ) == 0:
return F'''SkipList(level={self.level})'''
__UpperCAmelCase = max((len(str(_lowercase ) ) for item in items) , default=4 )
__UpperCAmelCase = max(_lowercase , 4 ) + 4
__UpperCAmelCase = self.head
__UpperCAmelCase = []
__UpperCAmelCase = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(_lowercase , '''-''' ) + '''* ''' * len(_lowercase ) )
lines.append(''' ''' * label_size + '''| ''' * len(_lowercase ) )
while len(node.forward ) != 0:
__UpperCAmelCase = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(_lowercase , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(_lowercase ) )
__UpperCAmelCase = node.forward
lines.append('''None'''.ljust(_lowercase ) + '''* ''' * len(_lowercase ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(_lowercase )
def __iter__( self : int ):
__UpperCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__UpperCAmelCase = node.forward[0]
def a ( self : Optional[Any] ):
__UpperCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def a ( self : Optional[int] , _lowercase : Tuple ):
__UpperCAmelCase = []
__UpperCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__UpperCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def a ( self : Optional[Any] , _lowercase : KT ):
__UpperCAmelCase , __UpperCAmelCase = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__UpperCAmelCase = node.forward[i]
else:
__UpperCAmelCase = update_node.forward[:i]
def a ( self : Union[str, Any] , _lowercase : KT , _lowercase : VT ):
__UpperCAmelCase , __UpperCAmelCase = self._locate_node(_lowercase )
if node is not None:
__UpperCAmelCase = value
else:
__UpperCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
__UpperCAmelCase = level
__UpperCAmelCase = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
__UpperCAmelCase = new_node
def a ( self : List[Any] , _lowercase : VT ):
__UpperCAmelCase , __UpperCAmelCase = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__UpperCAmelCase = skip_list.head
__UpperCAmelCase = {}
while node.level != 0:
__UpperCAmelCase = node.forward[0]
__UpperCAmelCase = node.value
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__UpperCAmelCase = skip_list.head
__UpperCAmelCase = {}
while node.level != 0:
__UpperCAmelCase = node.forward[0]
__UpperCAmelCase = node.value
if len(snake_case_ ) != 4:
print()
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
assert skip_list.find('''Some key''' ) is None
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(snake_case_ :int ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowercase__ ( ):
def is_sorted(snake_case_ :Union[str, Any] ):
return all(next_item >= item for item, next_item in zip(snake_case_ , lst[1:] ) )
__UpperCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(snake_case_ , snake_case_ )
assert is_sorted(list(snake_case_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case_ ) )
def lowercase__ ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 49
|
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase : Any = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase : Optional[int] = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase : Dict = spec.loader.load_module()
UpperCAmelCase : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase : Dict = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
UpperCAmelCase : Any = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for config_class in list(CONFIG_MAPPING.values() ):
__SCREAMING_SNAKE_CASE = False
# source code of `config_class`
__SCREAMING_SNAKE_CASE = inspect.getsource(a__ )
__SCREAMING_SNAKE_CASE = _re_checkpoint.findall(a__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__SCREAMING_SNAKE_CASE = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
__SCREAMING_SNAKE_CASE = True
break
__SCREAMING_SNAKE_CASE = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(a__ )
if len(a__ ) > 0:
__SCREAMING_SNAKE_CASE = """\n""".join(sorted(a__ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 627
| 0
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def A_ ( snake_case ):
if num <= 0:
raise ValueError("math domain error" )
return quad(snake_case , 0 , snake_case , args=(snake_case) )[0]
def A_ ( snake_case , snake_case ):
return math.pow(snake_case , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 465
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_a ):
_A : Any = ['''torch''', '''torchsde''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Tuple ):
requires_backends(self ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["torch", "torchsde"] )
| 465
| 1
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any]=13 , __UpperCamelCase : List[Any]=30 , __UpperCamelCase : Dict=2 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : str=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : List[Any]=32 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : List[Any]=37 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : List[str]=10 , __UpperCamelCase : Dict=0.0_2 , ) -> int:
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , )
return config, pixel_values
def __UpperCamelCase ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> List[Any]:
A = FlaxViTModel(config=__A )
A = model(__A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
A = (self.image_size, self.image_size)
A = (self.patch_size, self.patch_size)
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> str:
A = self.type_sequence_label_size
A = FlaxViTForImageClassification(config=__A )
A = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = FlaxViTForImageClassification(__A )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__A )
def __UpperCamelCase ( self : int ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
A
) = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( _a , unittest.TestCase ):
A_ : Union[str, Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __UpperCamelCase ( self : str ) -> None:
A = FlaxViTModelTester(self )
A = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __UpperCamelCase ( self : str ) -> str:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__A )
A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A = self._prepare_for_class(__A , __A )
A = model_class(__A )
@jax.jit
def model_jitted(__UpperCamelCase : List[str] , **__UpperCamelCase : int ):
return model(pixel_values=__A , **__A )
with self.subTest('JIT Enabled' ):
A = model_jitted(**__A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
A = model_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) )
for jitted_output, output in zip(__A , __A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCamelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained('google/vit-base-patch16-224' )
A = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__A )
| 106
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
snake_case__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
snake_case__ = TaTokenizerFast
snake_case__ = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
snake_case__ = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 395
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Tuple = 1_6
UpperCAmelCase : Dict = 3_2
def _a ( lowerCAmelCase_ , lowerCAmelCase_ = 16 ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : Optional[Any] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
_snake_case : Any = 8
else:
_snake_case : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_snake_case : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_snake_case : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : Dict = mocked_dataloaders # noqa: F811
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase_ ) == "1":
_snake_case : Optional[Any] = 2
# New Code #
_snake_case : List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
_snake_case : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config['''lr''']
_snake_case : Optional[int] = int(config['''num_epochs'''] )
_snake_case : Dict = int(config['''seed'''] )
_snake_case : Optional[Any] = int(config['''batch_size'''] )
_snake_case : List[str] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(lowerCAmelCase_ )
_snake_case , _snake_case : Any = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : Any = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_snake_case : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Tuple = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
_snake_case : Union[str, Any] = model(**lowerCAmelCase_ )
_snake_case : Optional[Any] = output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Union[str, Any] = model(**lowerCAmelCase_ )
_snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : str = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase_ )
def _a ( ):
"""simple docstring"""
_snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCAmelCase_ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_snake_case : List[Any] = parser.parse_args()
_snake_case : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 47
|
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
_snake_case : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = 0
_snake_case : int = 2
while digits < n:
index += 1
_snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.